mirror of
https://github.com/nosqlbench/nosqlbench.git
synced 2025-02-25 18:55:28 -06:00
Merge remote-tracking branch 'origin/main' into nosqlbench-948-testcontainers
This commit is contained in:
commit
ed4be4ee4b
484
.all-contributorsrc
Normal file
484
.all-contributorsrc
Normal file
@ -0,0 +1,484 @@
|
||||
{
|
||||
"projectOwner": "nosqlbench",
|
||||
"projectName": "nosqlbench",
|
||||
"repoType": "github",
|
||||
"repoHost": "https://github.com",
|
||||
"files": [
|
||||
"CONTRIBUTING.md"
|
||||
],
|
||||
"imageSize": 50,
|
||||
"commit": false,
|
||||
"commitConvention": "angular",
|
||||
"contributorsPerLine": 6,
|
||||
"contributorsSortAlphabetically": false,
|
||||
"linkToUsage": false,
|
||||
"skipCi": true,
|
||||
"contributors": [
|
||||
{
|
||||
"login": "jshook",
|
||||
"name": "Jonathan Shook",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/2148847?v=4",
|
||||
"profile": "https://github.com/jshook",
|
||||
"contributions": [
|
||||
"review",
|
||||
"tool",
|
||||
"bug",
|
||||
"business",
|
||||
"code",
|
||||
"content",
|
||||
"data",
|
||||
"doc",
|
||||
"design",
|
||||
"example",
|
||||
"ideas",
|
||||
"infra",
|
||||
"maintenance",
|
||||
"mentoring",
|
||||
"platform",
|
||||
"plugin",
|
||||
"projectManagement",
|
||||
"research",
|
||||
"security",
|
||||
"test",
|
||||
"userTesting"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "MikeYaacoubStax",
|
||||
"name": "MikeYaacoubStax",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/117678633?v=4",
|
||||
"profile": "https://github.com/MikeYaacoubStax",
|
||||
"contributions": [
|
||||
"review",
|
||||
"tool",
|
||||
"bug",
|
||||
"business",
|
||||
"code",
|
||||
"content",
|
||||
"data",
|
||||
"doc",
|
||||
"design",
|
||||
"example",
|
||||
"ideas",
|
||||
"infra",
|
||||
"maintenance",
|
||||
"mentoring",
|
||||
"platform",
|
||||
"plugin",
|
||||
"projectManagement",
|
||||
"research",
|
||||
"security",
|
||||
"test",
|
||||
"userTesting"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "jeffbanks",
|
||||
"name": "Jeff Banks",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/4078933?v=4",
|
||||
"profile": "http://jjbanks.com",
|
||||
"contributions": [
|
||||
"code",
|
||||
"mentoring",
|
||||
"test",
|
||||
"bug",
|
||||
"business",
|
||||
"content",
|
||||
"data",
|
||||
"doc",
|
||||
"design",
|
||||
"example",
|
||||
"ideas",
|
||||
"infra",
|
||||
"maintenance",
|
||||
"platform",
|
||||
"plugin",
|
||||
"projectManagement",
|
||||
"research",
|
||||
"review",
|
||||
"security",
|
||||
"tool",
|
||||
"userTesting"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "msmygit",
|
||||
"name": "Madhavan",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/19366623?v=4",
|
||||
"profile": "https://github.com/msmygit",
|
||||
"contributions": [
|
||||
"code",
|
||||
"bug",
|
||||
"doc",
|
||||
"ideas",
|
||||
"question",
|
||||
"research",
|
||||
"review",
|
||||
"tool",
|
||||
"userTesting",
|
||||
"talk",
|
||||
"tutorial"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "hemidactylus",
|
||||
"name": "Stefano Lottini",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/14221764?v=4",
|
||||
"profile": "https://github.com/hemidactylus",
|
||||
"contributions": [
|
||||
"bug",
|
||||
"blog",
|
||||
"code",
|
||||
"content",
|
||||
"data",
|
||||
"doc",
|
||||
"example",
|
||||
"eventOrganizing",
|
||||
"mentoring",
|
||||
"promotion",
|
||||
"research",
|
||||
"tutorial",
|
||||
"userTesting",
|
||||
"video"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "phact",
|
||||
"name": "Sebastián Estévez",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/1313220?v=4",
|
||||
"profile": "https://github.com/phact",
|
||||
"contributions": [
|
||||
"bug",
|
||||
"design",
|
||||
"business",
|
||||
"code",
|
||||
"content",
|
||||
"data",
|
||||
"doc",
|
||||
"ideas",
|
||||
"promotion",
|
||||
"research"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "smccarthy788",
|
||||
"name": "Sean McCarthy",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/6601281?v=4",
|
||||
"profile": "https://github.com/smccarthy788",
|
||||
"contributions": [
|
||||
"mentoring",
|
||||
"ideas",
|
||||
"code",
|
||||
"test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "yabinmeng",
|
||||
"name": "yabinmeng",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/16789452?v=4",
|
||||
"profile": "https://github.com/yabinmeng",
|
||||
"contributions": [
|
||||
"bug",
|
||||
"test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "eolivelli",
|
||||
"name": "Enrico Olivelli",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/9469110?v=4",
|
||||
"profile": "http://eolivelli.blogspot.it/",
|
||||
"contributions": [
|
||||
"test",
|
||||
"code",
|
||||
"review"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "lhotari",
|
||||
"name": "Lari Hotari",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/66864?v=4",
|
||||
"profile": "https://github.com/lhotari",
|
||||
"contributions": [
|
||||
"bug",
|
||||
"code",
|
||||
"review"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "mfleming",
|
||||
"name": "Matt Fleming",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/94254?v=4",
|
||||
"profile": "http://www.codeblueprint.co.uk",
|
||||
"contributions": [
|
||||
"bug",
|
||||
"design"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "tjake",
|
||||
"name": "Jake Luciani",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/44456?v=4",
|
||||
"profile": "https://github.com/tjake",
|
||||
"contributions": [
|
||||
"bug",
|
||||
"ideas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "lakshmi-M18",
|
||||
"name": "Lakshmi Manjunatha",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/89935678?v=4",
|
||||
"profile": "https://github.com/lakshmi-M18",
|
||||
"contributions": [
|
||||
"bug"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "pingtimeout",
|
||||
"name": "Pierre Laporte",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/1159578?v=4",
|
||||
"profile": "http://www.pingtimeout.fr",
|
||||
"contributions": [
|
||||
"ideas",
|
||||
"bug"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "tatu-at-datastax",
|
||||
"name": "Tatu Saloranta",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/87213665?v=4",
|
||||
"profile": "https://github.com/tatu-at-datastax",
|
||||
"contributions": [
|
||||
"doc"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "alexott",
|
||||
"name": "Alex Ott",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/30342?v=4",
|
||||
"profile": "http://alexott.net",
|
||||
"contributions": [
|
||||
"platform",
|
||||
"bug",
|
||||
"code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "jeffreyscarpenter",
|
||||
"name": "Jeffrey Carpenter",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12115970?v=4",
|
||||
"profile": "https://github.com/jeffreyscarpenter",
|
||||
"contributions": [
|
||||
"bug",
|
||||
"test",
|
||||
"maintenance"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "yassermohamed81",
|
||||
"name": "yassermohamed81",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/53837411?v=4",
|
||||
"profile": "https://github.com/yassermohamed81",
|
||||
"contributions": [
|
||||
"code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "Pierrotws",
|
||||
"name": "Pierre Sauvage",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/6002161?v=4",
|
||||
"profile": "https://github.com/Pierrotws",
|
||||
"contributions": [
|
||||
"code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "dougwettlaufer",
|
||||
"name": "Doug Wettlaufer",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/45750136?v=4",
|
||||
"profile": "https://github.com/dougwettlaufer",
|
||||
"contributions": [
|
||||
"test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "jeromatron",
|
||||
"name": "Jeremy Hanna",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/254887?v=4",
|
||||
"profile": "http://jeromatron.blogspot.com",
|
||||
"contributions": [
|
||||
"test",
|
||||
"ideas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "alicel",
|
||||
"name": "Alice Lottini",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/2972347?v=4",
|
||||
"profile": "https://github.com/alicel",
|
||||
"contributions": [
|
||||
"bug",
|
||||
"ideas",
|
||||
"test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "EricBorczuk",
|
||||
"name": "Eric Borczuk",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/4205492?v=4",
|
||||
"profile": "https://github.com/EricBorczuk",
|
||||
"contributions": [
|
||||
"code",
|
||||
"review",
|
||||
"test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "weideng1",
|
||||
"name": "weideng1",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/5520525?v=4",
|
||||
"profile": "https://github.com/weideng1",
|
||||
"contributions": [
|
||||
"test",
|
||||
"ideas",
|
||||
"code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "ivansenic",
|
||||
"name": "Ivan Senic",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/10600041?v=4",
|
||||
"profile": "https://github.com/ivansenic",
|
||||
"contributions": [
|
||||
"test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "justinchuch",
|
||||
"name": "Justin Chu",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/15710241?v=4",
|
||||
"profile": "https://justinchuch.wordpress.com/",
|
||||
"contributions": [
|
||||
"code",
|
||||
"test",
|
||||
"review"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "ds-steven-matison",
|
||||
"name": "Steven Matison",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/70520951?v=4",
|
||||
"profile": "https://ds-steven-matison.github.io/",
|
||||
"contributions": [
|
||||
"test",
|
||||
"ideas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "szimmer1",
|
||||
"name": "shahar z",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/8455475?v=4",
|
||||
"profile": "https://github.com/szimmer1",
|
||||
"contributions": [
|
||||
"ideas",
|
||||
"code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "ncarvind",
|
||||
"name": "ncarvind",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/70302571?v=4",
|
||||
"profile": "https://github.com/ncarvind",
|
||||
"contributions": [
|
||||
"code",
|
||||
"test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "MMirelli",
|
||||
"name": "Massimiliano Mirelli",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/22191891?v=4",
|
||||
"profile": "https://github.com/MMirelli",
|
||||
"contributions": [
|
||||
"platform"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "derrickCos",
|
||||
"name": "Derrick Cosmas",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/25781387?v=4",
|
||||
"profile": "https://github.com/derrickCos",
|
||||
"contributions": [
|
||||
"code",
|
||||
"ideas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "grighetto",
|
||||
"name": "Gianluca Righetto",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/413792?v=4",
|
||||
"profile": "https://github.com/grighetto",
|
||||
"contributions": [
|
||||
"platform"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "BrynCooke",
|
||||
"name": "Bryn Cooke",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/747836?v=4",
|
||||
"profile": "https://github.com/BrynCooke",
|
||||
"contributions": [
|
||||
"doc"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "KatSarah",
|
||||
"name": "KatSarah",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/658015?v=4",
|
||||
"profile": "https://github.com/KatSarah",
|
||||
"contributions": [
|
||||
"ideas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "peytoncasper",
|
||||
"name": "Peyton Casper",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/8305883?v=4",
|
||||
"profile": "https://github.com/peytoncasper",
|
||||
"contributions": [
|
||||
"ideas",
|
||||
"code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "landim",
|
||||
"name": "Arthur Costa",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/91446?v=4",
|
||||
"profile": "https://github.com/landim",
|
||||
"contributions": [
|
||||
"bug"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "guyboltonking",
|
||||
"name": "Guy Bolton King",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/98294?v=4",
|
||||
"profile": "https://github.com/guyboltonking",
|
||||
"contributions": [
|
||||
"bug",
|
||||
"code",
|
||||
"ideas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "XN137",
|
||||
"name": "Christopher Lambert",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/1204398?v=4",
|
||||
"profile": "https://github.com/XN137",
|
||||
"contributions": [
|
||||
"code",
|
||||
"ideas"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
1
.github/workflows/build.yml
vendored
1
.github/workflows/build.yml
vendored
@ -79,6 +79,7 @@ jobs:
|
||||
builddocs:
|
||||
needs: build
|
||||
runs-on: ubuntu-20.04
|
||||
if: ${{ github.repository == 'nosqlbench/nosqlbench' && github.event_name == 'push' && github.ref_name == 'main' }}
|
||||
steps:
|
||||
|
||||
- name: set git username
|
||||
|
@ -1,3 +1,5 @@
|
||||
[](#contributors)
|
||||
|
||||
NoSQLBench is an ambitious project. It aims to solve long-standing problems in distributed systems
|
||||
testing. There are *many* ways you can contribute! Please take a moment to review this document
|
||||
in order to make the contribution process easy and effective for everyone involved.
|
||||
@ -117,5 +119,73 @@ are eager to get it into the hands of users who need it.
|
||||
[discord server](https://discord.gg/dBHRakusMN) and raise your hand!
|
||||
|
||||
|
||||
## Contributors
|
||||
Thanks to these contributors! :sparkle:
|
||||
For recognizing contributions, please follow [this documentation](https://allcontributors.org/docs/en/bot/usage) and pick a key/contribution type from [here](https://allcontributors.org/docs/en/emoji-key).
|
||||
|
||||
<!-- ALL-CONTRIBUTORS-LIST:START - Do not remove or modify this section -->
|
||||
<!-- prettier-ignore-start -->
|
||||
<!-- markdownlint-disable -->
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/jshook"><img src="https://avatars.githubusercontent.com/u/2148847?v=4?s=50" width="50px;" alt="Jonathan Shook"/><br /><sub><b>Jonathan Shook</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/pulls?q=is%3Apr+reviewed-by%3Ajshook" title="Reviewed Pull Requests">👀</a> <a href="#tool-jshook" title="Tools">🔧</a> <a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Ajshook" title="Bug reports">🐛</a> <a href="#business-jshook" title="Business development">💼</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=jshook" title="Code">💻</a> <a href="#content-jshook" title="Content">🖋</a> <a href="#data-jshook" title="Data">🔣</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=jshook" title="Documentation">📖</a> <a href="#design-jshook" title="Design">🎨</a> <a href="#example-jshook" title="Examples">💡</a> <a href="#ideas-jshook" title="Ideas, Planning, & Feedback">🤔</a> <a href="#infra-jshook" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#maintenance-jshook" title="Maintenance">🚧</a> <a href="#mentoring-jshook" title="Mentoring">🧑🏫</a> <a href="#platform-jshook" title="Packaging/porting to new platform">📦</a> <a href="#plugin-jshook" title="Plugin/utility libraries">🔌</a> <a href="#projectManagement-jshook" title="Project Management">📆</a> <a href="#research-jshook" title="Research">🔬</a> <a href="#security-jshook" title="Security">🛡️</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=jshook" title="Tests">⚠️</a> <a href="#userTesting-jshook" title="User Testing">📓</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/MikeYaacoubStax"><img src="https://avatars.githubusercontent.com/u/117678633?v=4?s=50" width="50px;" alt="MikeYaacoubStax"/><br /><sub><b>MikeYaacoubStax</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/pulls?q=is%3Apr+reviewed-by%3AMikeYaacoubStax" title="Reviewed Pull Requests">👀</a> <a href="#tool-MikeYaacoubStax" title="Tools">🔧</a> <a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3AMikeYaacoubStax" title="Bug reports">🐛</a> <a href="#business-MikeYaacoubStax" title="Business development">💼</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=MikeYaacoubStax" title="Code">💻</a> <a href="#content-MikeYaacoubStax" title="Content">🖋</a> <a href="#data-MikeYaacoubStax" title="Data">🔣</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=MikeYaacoubStax" title="Documentation">📖</a> <a href="#design-MikeYaacoubStax" title="Design">🎨</a> <a href="#example-MikeYaacoubStax" title="Examples">💡</a> <a href="#ideas-MikeYaacoubStax" title="Ideas, Planning, & Feedback">🤔</a> <a href="#infra-MikeYaacoubStax" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#maintenance-MikeYaacoubStax" title="Maintenance">🚧</a> <a href="#mentoring-MikeYaacoubStax" title="Mentoring">🧑🏫</a> <a href="#platform-MikeYaacoubStax" title="Packaging/porting to new platform">📦</a> <a href="#plugin-MikeYaacoubStax" title="Plugin/utility libraries">🔌</a> <a href="#projectManagement-MikeYaacoubStax" title="Project Management">📆</a> <a href="#research-MikeYaacoubStax" title="Research">🔬</a> <a href="#security-MikeYaacoubStax" title="Security">🛡️</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=MikeYaacoubStax" title="Tests">⚠️</a> <a href="#userTesting-MikeYaacoubStax" title="User Testing">📓</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="http://jjbanks.com"><img src="https://avatars.githubusercontent.com/u/4078933?v=4?s=50" width="50px;" alt="Jeff Banks"/><br /><sub><b>Jeff Banks</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=jeffbanks" title="Code">💻</a> <a href="#mentoring-jeffbanks" title="Mentoring">🧑🏫</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=jeffbanks" title="Tests">⚠️</a> <a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Ajeffbanks" title="Bug reports">🐛</a> <a href="#business-jeffbanks" title="Business development">💼</a> <a href="#content-jeffbanks" title="Content">🖋</a> <a href="#data-jeffbanks" title="Data">🔣</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=jeffbanks" title="Documentation">📖</a> <a href="#design-jeffbanks" title="Design">🎨</a> <a href="#example-jeffbanks" title="Examples">💡</a> <a href="#ideas-jeffbanks" title="Ideas, Planning, & Feedback">🤔</a> <a href="#infra-jeffbanks" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#maintenance-jeffbanks" title="Maintenance">🚧</a> <a href="#platform-jeffbanks" title="Packaging/porting to new platform">📦</a> <a href="#plugin-jeffbanks" title="Plugin/utility libraries">🔌</a> <a href="#projectManagement-jeffbanks" title="Project Management">📆</a> <a href="#research-jeffbanks" title="Research">🔬</a> <a href="https://github.com/nosqlbench/nosqlbench/pulls?q=is%3Apr+reviewed-by%3Ajeffbanks" title="Reviewed Pull Requests">👀</a> <a href="#security-jeffbanks" title="Security">🛡️</a> <a href="#tool-jeffbanks" title="Tools">🔧</a> <a href="#userTesting-jeffbanks" title="User Testing">📓</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/msmygit"><img src="https://avatars.githubusercontent.com/u/19366623?v=4?s=50" width="50px;" alt="Madhavan"/><br /><sub><b>Madhavan</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=msmygit" title="Code">💻</a> <a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Amsmygit" title="Bug reports">🐛</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=msmygit" title="Documentation">📖</a> <a href="#ideas-msmygit" title="Ideas, Planning, & Feedback">🤔</a> <a href="#question-msmygit" title="Answering Questions">💬</a> <a href="#research-msmygit" title="Research">🔬</a> <a href="https://github.com/nosqlbench/nosqlbench/pulls?q=is%3Apr+reviewed-by%3Amsmygit" title="Reviewed Pull Requests">👀</a> <a href="#tool-msmygit" title="Tools">🔧</a> <a href="#userTesting-msmygit" title="User Testing">📓</a> <a href="#talk-msmygit" title="Talks">📢</a> <a href="#tutorial-msmygit" title="Tutorials">✅</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/hemidactylus"><img src="https://avatars.githubusercontent.com/u/14221764?v=4?s=50" width="50px;" alt="Stefano Lottini"/><br /><sub><b>Stefano Lottini</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Ahemidactylus" title="Bug reports">🐛</a> <a href="#blog-hemidactylus" title="Blogposts">📝</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=hemidactylus" title="Code">💻</a> <a href="#content-hemidactylus" title="Content">🖋</a> <a href="#data-hemidactylus" title="Data">🔣</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=hemidactylus" title="Documentation">📖</a> <a href="#example-hemidactylus" title="Examples">💡</a> <a href="#eventOrganizing-hemidactylus" title="Event Organizing">📋</a> <a href="#mentoring-hemidactylus" title="Mentoring">🧑🏫</a> <a href="#promotion-hemidactylus" title="Promotion">📣</a> <a href="#research-hemidactylus" title="Research">🔬</a> <a href="#tutorial-hemidactylus" title="Tutorials">✅</a> <a href="#userTesting-hemidactylus" title="User Testing">📓</a> <a href="#video-hemidactylus" title="Videos">📹</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/phact"><img src="https://avatars.githubusercontent.com/u/1313220?v=4?s=50" width="50px;" alt="Sebastián Estévez"/><br /><sub><b>Sebastián Estévez</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Aphact" title="Bug reports">🐛</a> <a href="#design-phact" title="Design">🎨</a> <a href="#business-phact" title="Business development">💼</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=phact" title="Code">💻</a> <a href="#content-phact" title="Content">🖋</a> <a href="#data-phact" title="Data">🔣</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=phact" title="Documentation">📖</a> <a href="#ideas-phact" title="Ideas, Planning, & Feedback">🤔</a> <a href="#promotion-phact" title="Promotion">📣</a> <a href="#research-phact" title="Research">🔬</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/smccarthy788"><img src="https://avatars.githubusercontent.com/u/6601281?v=4?s=50" width="50px;" alt="Sean McCarthy"/><br /><sub><b>Sean McCarthy</b></sub></a><br /><a href="#mentoring-smccarthy788" title="Mentoring">🧑🏫</a> <a href="#ideas-smccarthy788" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=smccarthy788" title="Code">💻</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=smccarthy788" title="Tests">⚠️</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/yabinmeng"><img src="https://avatars.githubusercontent.com/u/16789452?v=4?s=50" width="50px;" alt="yabinmeng"/><br /><sub><b>yabinmeng</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Ayabinmeng" title="Bug reports">🐛</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=yabinmeng" title="Tests">⚠️</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="http://eolivelli.blogspot.it/"><img src="https://avatars.githubusercontent.com/u/9469110?v=4?s=50" width="50px;" alt="Enrico Olivelli"/><br /><sub><b>Enrico Olivelli</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=eolivelli" title="Tests">⚠️</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=eolivelli" title="Code">💻</a> <a href="https://github.com/nosqlbench/nosqlbench/pulls?q=is%3Apr+reviewed-by%3Aeolivelli" title="Reviewed Pull Requests">👀</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/lhotari"><img src="https://avatars.githubusercontent.com/u/66864?v=4?s=50" width="50px;" alt="Lari Hotari"/><br /><sub><b>Lari Hotari</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Alhotari" title="Bug reports">🐛</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=lhotari" title="Code">💻</a> <a href="https://github.com/nosqlbench/nosqlbench/pulls?q=is%3Apr+reviewed-by%3Alhotari" title="Reviewed Pull Requests">👀</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="http://www.codeblueprint.co.uk"><img src="https://avatars.githubusercontent.com/u/94254?v=4?s=50" width="50px;" alt="Matt Fleming"/><br /><sub><b>Matt Fleming</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Amfleming" title="Bug reports">🐛</a> <a href="#design-mfleming" title="Design">🎨</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/tjake"><img src="https://avatars.githubusercontent.com/u/44456?v=4?s=50" width="50px;" alt="Jake Luciani"/><br /><sub><b>Jake Luciani</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Atjake" title="Bug reports">🐛</a> <a href="#ideas-tjake" title="Ideas, Planning, & Feedback">🤔</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/lakshmi-M18"><img src="https://avatars.githubusercontent.com/u/89935678?v=4?s=50" width="50px;" alt="Lakshmi Manjunatha"/><br /><sub><b>Lakshmi Manjunatha</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Alakshmi-M18" title="Bug reports">🐛</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="http://www.pingtimeout.fr"><img src="https://avatars.githubusercontent.com/u/1159578?v=4?s=50" width="50px;" alt="Pierre Laporte"/><br /><sub><b>Pierre Laporte</b></sub></a><br /><a href="#ideas-pingtimeout" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Apingtimeout" title="Bug reports">🐛</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/tatu-at-datastax"><img src="https://avatars.githubusercontent.com/u/87213665?v=4?s=50" width="50px;" alt="Tatu Saloranta"/><br /><sub><b>Tatu Saloranta</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=tatu-at-datastax" title="Documentation">📖</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="http://alexott.net"><img src="https://avatars.githubusercontent.com/u/30342?v=4?s=50" width="50px;" alt="Alex Ott"/><br /><sub><b>Alex Ott</b></sub></a><br /><a href="#platform-alexott" title="Packaging/porting to new platform">📦</a> <a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Aalexott" title="Bug reports">🐛</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=alexott" title="Code">💻</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/jeffreyscarpenter"><img src="https://avatars.githubusercontent.com/u/12115970?v=4?s=50" width="50px;" alt="Jeffrey Carpenter"/><br /><sub><b>Jeffrey Carpenter</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Ajeffreyscarpenter" title="Bug reports">🐛</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=jeffreyscarpenter" title="Tests">⚠️</a> <a href="#maintenance-jeffreyscarpenter" title="Maintenance">🚧</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/yassermohamed81"><img src="https://avatars.githubusercontent.com/u/53837411?v=4?s=50" width="50px;" alt="yassermohamed81"/><br /><sub><b>yassermohamed81</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=yassermohamed81" title="Code">💻</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/Pierrotws"><img src="https://avatars.githubusercontent.com/u/6002161?v=4?s=50" width="50px;" alt="Pierre Sauvage"/><br /><sub><b>Pierre Sauvage</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=Pierrotws" title="Code">💻</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/dougwettlaufer"><img src="https://avatars.githubusercontent.com/u/45750136?v=4?s=50" width="50px;" alt="Doug Wettlaufer"/><br /><sub><b>Doug Wettlaufer</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=dougwettlaufer" title="Tests">⚠️</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="http://jeromatron.blogspot.com"><img src="https://avatars.githubusercontent.com/u/254887?v=4?s=50" width="50px;" alt="Jeremy Hanna"/><br /><sub><b>Jeremy Hanna</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=jeromatron" title="Tests">⚠️</a> <a href="#ideas-jeromatron" title="Ideas, Planning, & Feedback">🤔</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/alicel"><img src="https://avatars.githubusercontent.com/u/2972347?v=4?s=50" width="50px;" alt="Alice Lottini"/><br /><sub><b>Alice Lottini</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Aalicel" title="Bug reports">🐛</a> <a href="#ideas-alicel" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=alicel" title="Tests">⚠️</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/EricBorczuk"><img src="https://avatars.githubusercontent.com/u/4205492?v=4?s=50" width="50px;" alt="Eric Borczuk"/><br /><sub><b>Eric Borczuk</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=EricBorczuk" title="Code">💻</a> <a href="https://github.com/nosqlbench/nosqlbench/pulls?q=is%3Apr+reviewed-by%3AEricBorczuk" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=EricBorczuk" title="Tests">⚠️</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/weideng1"><img src="https://avatars.githubusercontent.com/u/5520525?v=4?s=50" width="50px;" alt="weideng1"/><br /><sub><b>weideng1</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=weideng1" title="Tests">⚠️</a> <a href="#ideas-weideng1" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=weideng1" title="Code">💻</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/ivansenic"><img src="https://avatars.githubusercontent.com/u/10600041?v=4?s=50" width="50px;" alt="Ivan Senic"/><br /><sub><b>Ivan Senic</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=ivansenic" title="Tests">⚠️</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://justinchuch.wordpress.com/"><img src="https://avatars.githubusercontent.com/u/15710241?v=4?s=50" width="50px;" alt="Justin Chu"/><br /><sub><b>Justin Chu</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=justinchuch" title="Code">💻</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=justinchuch" title="Tests">⚠️</a> <a href="https://github.com/nosqlbench/nosqlbench/pulls?q=is%3Apr+reviewed-by%3Ajustinchuch" title="Reviewed Pull Requests">👀</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://ds-steven-matison.github.io/"><img src="https://avatars.githubusercontent.com/u/70520951?v=4?s=50" width="50px;" alt="Steven Matison"/><br /><sub><b>Steven Matison</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=ds-steven-matison" title="Tests">⚠️</a> <a href="#ideas-ds-steven-matison" title="Ideas, Planning, & Feedback">🤔</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/szimmer1"><img src="https://avatars.githubusercontent.com/u/8455475?v=4?s=50" width="50px;" alt="shahar z"/><br /><sub><b>shahar z</b></sub></a><br /><a href="#ideas-szimmer1" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=szimmer1" title="Code">💻</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/ncarvind"><img src="https://avatars.githubusercontent.com/u/70302571?v=4?s=50" width="50px;" alt="ncarvind"/><br /><sub><b>ncarvind</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=ncarvind" title="Code">💻</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=ncarvind" title="Tests">⚠️</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/MMirelli"><img src="https://avatars.githubusercontent.com/u/22191891?v=4?s=50" width="50px;" alt="Massimiliano Mirelli"/><br /><sub><b>Massimiliano Mirelli</b></sub></a><br /><a href="#platform-MMirelli" title="Packaging/porting to new platform">📦</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/derrickCos"><img src="https://avatars.githubusercontent.com/u/25781387?v=4?s=50" width="50px;" alt="Derrick Cosmas"/><br /><sub><b>Derrick Cosmas</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=derrickCos" title="Code">💻</a> <a href="#ideas-derrickCos" title="Ideas, Planning, & Feedback">🤔</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/grighetto"><img src="https://avatars.githubusercontent.com/u/413792?v=4?s=50" width="50px;" alt="Gianluca Righetto"/><br /><sub><b>Gianluca Righetto</b></sub></a><br /><a href="#platform-grighetto" title="Packaging/porting to new platform">📦</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/BrynCooke"><img src="https://avatars.githubusercontent.com/u/747836?v=4?s=50" width="50px;" alt="Bryn Cooke"/><br /><sub><b>Bryn Cooke</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=BrynCooke" title="Documentation">📖</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/KatSarah"><img src="https://avatars.githubusercontent.com/u/658015?v=4?s=50" width="50px;" alt="KatSarah"/><br /><sub><b>KatSarah</b></sub></a><br /><a href="#ideas-KatSarah" title="Ideas, Planning, & Feedback">🤔</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/peytoncasper"><img src="https://avatars.githubusercontent.com/u/8305883?v=4?s=50" width="50px;" alt="Peyton Casper"/><br /><sub><b>Peyton Casper</b></sub></a><br /><a href="#ideas-peytoncasper" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=peytoncasper" title="Code">💻</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/landim"><img src="https://avatars.githubusercontent.com/u/91446?v=4?s=50" width="50px;" alt="Arthur Costa"/><br /><sub><b>Arthur Costa</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Alandim" title="Bug reports">🐛</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/guyboltonking"><img src="https://avatars.githubusercontent.com/u/98294?v=4?s=50" width="50px;" alt="Guy Bolton King"/><br /><sub><b>Guy Bolton King</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/issues?q=author%3Aguyboltonking" title="Bug reports">🐛</a> <a href="https://github.com/nosqlbench/nosqlbench/commits?author=guyboltonking" title="Code">💻</a> <a href="#ideas-guyboltonking" title="Ideas, Planning, & Feedback">🤔</a></td>
|
||||
<td align="center" valign="top" width="16.66%"><a href="https://github.com/XN137"><img src="https://avatars.githubusercontent.com/u/1204398?v=4?s=50" width="50px;" alt="Christopher Lambert"/><br /><sub><b>Christopher Lambert</b></sub></a><br /><a href="https://github.com/nosqlbench/nosqlbench/commits?author=XN137" title="Code">💻</a> <a href="#ideas-XN137" title="Ideas, Planning, & Feedback">🤔</a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
<!-- prettier-ignore-end -->
|
||||
|
||||
<!-- ALL-CONTRIBUTORS-LIST:END -->
|
||||
|
||||
---
|
||||
|
10
README.md
10
README.md
@ -1,5 +1,7 @@
|
||||
[comment]: <  >
|
||||
|
||||
[](https://maven-badges.herokuapp.com/maven-central/io.nosqlbench/nosqlbench)
|
||||
[](https://github.com/nosqlbench/nosqlbench/stargazers)
|
||||
[](https://discord.gg/dBHRakusMN)
|
||||
|
||||
# NoSQLBench v5
|
||||
|
||||
@ -104,3 +106,9 @@ available, but more work is needed to support them fully. Here is what is suppor
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
## Contributors
|
||||
Checkout all our wonderful contributors [here](./CONTRIBUTING.md#contributors).
|
||||
|
||||
---
|
@ -73,7 +73,7 @@
|
||||
<dependency>
|
||||
<groupId>org.snakeyaml</groupId>
|
||||
<artifactId>snakeyaml-engine</artifactId>
|
||||
<version>2.5</version>
|
||||
<version>2.6</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -18,16 +18,16 @@ package io.nosqlbench.cqlgen.core;
|
||||
|
||||
import com.google.gson.Gson;
|
||||
import com.google.gson.GsonBuilder;
|
||||
import io.nosqlbench.api.apps.BundledApp;
|
||||
import io.nosqlbench.api.content.Content;
|
||||
import io.nosqlbench.api.content.NBIO;
|
||||
import io.nosqlbench.api.apps.BundledApp;
|
||||
import io.nosqlbench.cqlgen.api.BindingsLibrary;
|
||||
import io.nosqlbench.cqlgen.binders.Binding;
|
||||
import io.nosqlbench.cqlgen.binders.BindingsAccumulator;
|
||||
import io.nosqlbench.cqlgen.api.BindingsLibrary;
|
||||
import io.nosqlbench.cqlgen.binders.NamingFolio;
|
||||
import io.nosqlbench.cqlgen.transformers.CGModelTransformers;
|
||||
import io.nosqlbench.cqlgen.model.*;
|
||||
import io.nosqlbench.cqlgen.parser.CqlModelParser;
|
||||
import io.nosqlbench.cqlgen.transformers.CGModelTransformers;
|
||||
import io.nosqlbench.nb.annotations.Service;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
@ -75,14 +75,14 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
private Map<String, List<String>> blockplan = Map.of();
|
||||
|
||||
private final Map<String, Double> timeouts = new HashMap<String, Double>(Map.of(
|
||||
"create", 60.0,
|
||||
"truncate", 900.0,
|
||||
"drop", 900.0,
|
||||
"scan", 30.0,
|
||||
"select", 10.0,
|
||||
"insert", 10.0,
|
||||
"delete", 10.0,
|
||||
"update", 10.0
|
||||
"create", 60.0,
|
||||
"truncate", 900.0,
|
||||
"drop", 900.0,
|
||||
"scan", 30.0,
|
||||
"select", 10.0,
|
||||
"insert", 10.0,
|
||||
"delete", 10.0,
|
||||
"update", 10.0
|
||||
));
|
||||
|
||||
public static void main(String[] args) {
|
||||
@ -166,7 +166,7 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
this.model = CqlModelParser.parse(ddl, srcpath);
|
||||
List<String> errorlist = model.getReferenceErrors();
|
||||
if (errorlist.size()>0) {
|
||||
if (errorlist.size() > 0) {
|
||||
for (String error : errorlist) {
|
||||
logger.error(error);
|
||||
}
|
||||
@ -177,12 +177,12 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
String workload = getWorkloadAsYaml();
|
||||
try {
|
||||
Files.writeString(
|
||||
target,
|
||||
workload,
|
||||
StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING
|
||||
target,
|
||||
workload,
|
||||
StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING
|
||||
);
|
||||
logger.info("Wrote workload template as '" + target + "'. Bear in mind that this is simply one version " +
|
||||
"of a workload using this schema, and may not be representative of actual production usage patterns.");
|
||||
"of a workload using this schema, and may not be representative of actual production usage patterns.");
|
||||
} catch (IOException e) {
|
||||
String errmsg = "There was an error writing '" + target + "'.";
|
||||
logger.error(errmsg);
|
||||
@ -218,7 +218,7 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
workload.put("bindings", new LinkedHashMap<String, String>());
|
||||
Map<String, Object> blocks = new LinkedHashMap<>();
|
||||
workload.put("params", new LinkedHashMap<>(
|
||||
Map.of("cl", "LOCAL_QUORUM")
|
||||
Map.of("cl", "LOCAL_QUORUM")
|
||||
));
|
||||
workload.put("blocks", blocks);
|
||||
|
||||
@ -227,7 +227,7 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
List<String> components = blocknameAndComponents.getValue();
|
||||
|
||||
LinkedHashMap<String, Object> block = new LinkedHashMap<>(
|
||||
Map.of("params", new LinkedHashMap<String, Object>())
|
||||
Map.of("params", new LinkedHashMap<String, Object>())
|
||||
);
|
||||
for (String component : components) {
|
||||
Map<String, Object> additions = switch (component) {
|
||||
@ -319,11 +319,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
return new LinkedHashMap<>() {{
|
||||
|
||||
put("default",
|
||||
new LinkedHashMap<>() {{
|
||||
put("schema", "run driver=cql tags=block:schema.* threads===UNDEF cycles===UNDEF");
|
||||
put("rampup", "run driver=cql tags=block:rampup.* threads=auto cycles===TEMPLATE(rampup-cycles,10000)");
|
||||
put("main", "run driver=cql tags=block:main.* threads=auto cycles===TEMPLATE(main-cycles,10000)");
|
||||
}});
|
||||
new LinkedHashMap<>() {{
|
||||
put("schema", "run driver=cql tags=block:\"schema.*\" threads===UNDEF cycles===UNDEF");
|
||||
put("rampup", "run driver=cql tags=block:rampup threads=auto cycles===TEMPLATE(rampup-cycles,10000)");
|
||||
put("main", "run driver=cql tags=block:\"main.*\" threads=auto cycles===TEMPLATE(main-cycles,10000)");
|
||||
}});
|
||||
|
||||
put("main-insert", "run driver=cql tags=block:main-insert threads=auto cycles===TEMPLATE(main-cycles,10000)");
|
||||
put("main-select", "run driver=cql tags=block:main-select threads=auto cycles===TEMPLATE(main-cycles,10000)");
|
||||
@ -351,12 +351,12 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
logger.debug(() -> "skipping table " + table.getFullName() + " for scan since there are no clustering columns");
|
||||
}
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype", "scan", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genScanSyntax(table),
|
||||
"timeout", timeouts.get("scan"),
|
||||
"ratio", readRatioFor(table)
|
||||
)
|
||||
namer.nameFor(table, "optype", "scan", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genScanSyntax(table),
|
||||
"timeout", timeouts.get("scan"),
|
||||
"ratio", readRatioFor(table)
|
||||
)
|
||||
);
|
||||
}
|
||||
return blockdata;
|
||||
@ -364,14 +364,14 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private String genScanSyntax(CqlTable table) {
|
||||
return """
|
||||
select * from KEYSPACE.TABLE
|
||||
where PREDICATE
|
||||
LIMIT;
|
||||
"""
|
||||
.replace("KEYSPACE", table.getKeyspace().getName())
|
||||
.replace("TABLE", table.getName())
|
||||
.replace("PREDICATE", genPredicateTemplate(table, -1))
|
||||
.replace("LIMIT", genLimitSyntax(table));
|
||||
select * from KEYSPACE.TABLE
|
||||
where PREDICATE
|
||||
LIMIT;
|
||||
"""
|
||||
.replace("KEYSPACE", table.getKeyspace().getName())
|
||||
.replace("TABLE", table.getName())
|
||||
.replace("PREDICATE", genPredicateTemplate(table, -1))
|
||||
.replace("LIMIT", genLimitSyntax(table));
|
||||
}
|
||||
|
||||
|
||||
@ -381,12 +381,12 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
blockdata.put("ops", ops);
|
||||
for (CqlTable table : model.getTableDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype", "select", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genSelectSyntax(table),
|
||||
"timeout", timeouts.get("select"),
|
||||
"ratio", readRatioFor(table)
|
||||
)
|
||||
namer.nameFor(table, "optype", "select", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genSelectSyntax(table),
|
||||
"timeout", timeouts.get("select"),
|
||||
"ratio", readRatioFor(table)
|
||||
)
|
||||
);
|
||||
}
|
||||
return blockdata;
|
||||
@ -394,14 +394,14 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private String genSelectSyntax(CqlTable table) {
|
||||
return """
|
||||
select * from KEYSPACE.TABLE
|
||||
where PREDICATE
|
||||
LIMIT;
|
||||
"""
|
||||
.replace("KEYSPACE", table.getKeyspace().getName())
|
||||
.replace("TABLE", table.getName())
|
||||
.replace("PREDICATE", genPredicateTemplate(table, 0))
|
||||
.replace("LIMIT", genLimitSyntax(table));
|
||||
select * from KEYSPACE.TABLE
|
||||
where PREDICATE
|
||||
LIMIT;
|
||||
"""
|
||||
.replace("KEYSPACE", table.getKeyspace().getName())
|
||||
.replace("TABLE", table.getName())
|
||||
.replace("PREDICATE", genPredicateTemplate(table, 0))
|
||||
.replace("LIMIT", genLimitSyntax(table));
|
||||
}
|
||||
|
||||
private String genLimitSyntax(CqlTable table) {
|
||||
@ -415,12 +415,12 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
for (CqlTable table : model.getTableDefs()) {
|
||||
if (!isCounterTable(table)) {
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype", "insert", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genInsertSyntax(table),
|
||||
"timeout", timeouts.get("insert"),
|
||||
"ratio", writeRatioFor(table)
|
||||
)
|
||||
namer.nameFor(table, "optype", "insert", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genInsertSyntax(table),
|
||||
"timeout", timeouts.get("insert"),
|
||||
"ratio", writeRatioFor(table)
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -433,22 +433,22 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
}
|
||||
|
||||
return """
|
||||
insert into KEYSPACE.TABLE
|
||||
( FIELDNAMES )
|
||||
VALUES
|
||||
( BINDINGS );
|
||||
"""
|
||||
.replace("KEYSPACE", table.getKeyspace().getName())
|
||||
.replace("TABLE", table.getName())
|
||||
.replace("FIELDNAMES",
|
||||
String.join(", ",
|
||||
table.getColumnDefs().stream()
|
||||
.map(CqlTableColumn::getName).toList()))
|
||||
.replaceAll("BINDINGS",
|
||||
String.join(", ",
|
||||
table.getColumnDefs().stream()
|
||||
.map(c -> binder.forColumn(c))
|
||||
.map(c -> "{" + c.getName() + "}").toList()));
|
||||
insert into KEYSPACE.TABLE
|
||||
( FIELDNAMES )
|
||||
VALUES
|
||||
( BINDINGS );
|
||||
"""
|
||||
.replace("KEYSPACE", table.getKeyspace().getName())
|
||||
.replace("TABLE", table.getName())
|
||||
.replace("FIELDNAMES",
|
||||
String.join(", ",
|
||||
table.getColumnDefs().stream()
|
||||
.map(CqlTableColumn::getName).toList()))
|
||||
.replaceAll("BINDINGS",
|
||||
String.join(", ",
|
||||
table.getColumnDefs().stream()
|
||||
.map(c -> binder.forColumn(c))
|
||||
.map(c -> "{" + c.getName() + "}").toList()));
|
||||
}
|
||||
|
||||
|
||||
@ -458,12 +458,12 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
blockdata.put("ops", ops);
|
||||
for (CqlTable table : model.getTableDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype", "update", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genUpdateSyntax(table),
|
||||
"timeout", timeouts.get("update"),
|
||||
"ratio", writeRatioFor(table)
|
||||
)
|
||||
namer.nameFor(table, "optype", "update", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genUpdateSyntax(table),
|
||||
"timeout", timeouts.get("update"),
|
||||
"ratio", writeRatioFor(table)
|
||||
)
|
||||
);
|
||||
}
|
||||
return blockdata;
|
||||
@ -472,7 +472,7 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private boolean isCounterTable(CqlTable table) {
|
||||
return table.getColumnDefs().stream()
|
||||
.anyMatch(cd -> cd.getTrimmedTypedef().equalsIgnoreCase("counter"));
|
||||
.anyMatch(cd -> cd.getTrimmedTypedef().equalsIgnoreCase("counter"));
|
||||
}
|
||||
|
||||
private int totalRatioFor(CqlTable table) {
|
||||
@ -540,9 +540,9 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
// TODO; constraints on predicates based on valid constructions
|
||||
pkeys.stream().map(this::genPredicatePart)
|
||||
.forEach(p -> {
|
||||
sb.append(p).append("\n AND ");
|
||||
});
|
||||
.forEach(p -> {
|
||||
sb.append(p).append("\n AND ");
|
||||
});
|
||||
if (sb.length() > 0) {
|
||||
sb.setLength(sb.length() - "\n AND ".length());
|
||||
}
|
||||
@ -557,14 +557,14 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private String genUpdateSyntax(CqlTable table) {
|
||||
return """
|
||||
update KEYSPACE.TABLE
|
||||
set ASSIGNMENTS
|
||||
where PREDICATES;
|
||||
"""
|
||||
.replaceAll("KEYSPACE", table.getKeyspace().getName())
|
||||
.replaceAll("TABLE", table.getName())
|
||||
.replaceAll("PREDICATES", genPredicateTemplate(table, 0))
|
||||
.replaceAll("ASSIGNMENTS", genAssignments(table));
|
||||
update KEYSPACE.TABLE
|
||||
set ASSIGNMENTS
|
||||
where PREDICATES;
|
||||
"""
|
||||
.replaceAll("KEYSPACE", table.getKeyspace().getName())
|
||||
.replaceAll("TABLE", table.getName())
|
||||
.replaceAll("PREDICATES", genPredicateTemplate(table, 0))
|
||||
.replaceAll("ASSIGNMENTS", genAssignments(table));
|
||||
}
|
||||
|
||||
private String genAssignments(CqlTable table) {
|
||||
@ -572,12 +572,12 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
for (CqlTableColumn coldef : table.getNonKeyColumnDefinitions()) {
|
||||
if (coldef.isCounter()) {
|
||||
sb.append(coldef.getName()).append("=")
|
||||
.append(coldef.getName()).append("+").append("{").append(binder.forColumn(coldef).getName()).append("}")
|
||||
.append(", ");
|
||||
.append(coldef.getName()).append("+").append("{").append(binder.forColumn(coldef).getName()).append("}")
|
||||
.append(", ");
|
||||
} else {
|
||||
sb.append(coldef.getName()).append("=")
|
||||
.append("{").append(binder.forColumn(coldef).getName()).append("}")
|
||||
.append(", ");
|
||||
.append("{").append(binder.forColumn(coldef).getName()).append("}")
|
||||
.append(", ");
|
||||
}
|
||||
}
|
||||
if (sb.length() > 0) {
|
||||
@ -602,16 +602,16 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
((Map<String, String>) workload.get("bindings")).putAll(bindingslib.getAccumulatedBindings());
|
||||
|
||||
DumpSettings dumpSettings = DumpSettings.builder()
|
||||
.setDefaultFlowStyle(FlowStyle.BLOCK)
|
||||
.setIndent(2)
|
||||
.setDefaultScalarStyle(ScalarStyle.PLAIN)
|
||||
.setMaxSimpleKeyLength(1000)
|
||||
.setWidth(100)
|
||||
.setSplitLines(true)
|
||||
.setIndentWithIndicator(true)
|
||||
.setMultiLineFlow(true)
|
||||
.setNonPrintableStyle(NonPrintableStyle.ESCAPE)
|
||||
.build();
|
||||
.setDefaultFlowStyle(FlowStyle.BLOCK)
|
||||
.setIndent(2)
|
||||
.setDefaultScalarStyle(ScalarStyle.PLAIN)
|
||||
.setMaxSimpleKeyLength(1000)
|
||||
.setWidth(100)
|
||||
.setSplitLines(true)
|
||||
.setIndentWithIndicator(true)
|
||||
.setMultiLineFlow(true)
|
||||
.setNonPrintableStyle(NonPrintableStyle.ESCAPE)
|
||||
.build();
|
||||
BaseRepresenter r;
|
||||
Dump dump = new Dump(dumpSettings);
|
||||
|
||||
@ -637,11 +637,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
dropTablesBlock.put("ops", ops);
|
||||
for (CqlTable table : model.getTableDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype", "drop", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "drop table if exists " + table.getFullName() + ";",
|
||||
"timeout", timeouts.get("drop")
|
||||
)
|
||||
namer.nameFor(table, "optype", "drop", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "drop table if exists " + table.getFullName() + ";",
|
||||
"timeout", timeouts.get("drop")
|
||||
)
|
||||
);
|
||||
}
|
||||
return dropTablesBlock;
|
||||
@ -653,11 +653,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
dropTypesBlock.put("ops", ops);
|
||||
for (CqlType type : model.getTypeDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(type, "optype", "drop-type", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "drop type if exists " + type.getKeyspace() + "." + type.getName() + ";",
|
||||
"timeout", timeouts.get("drop")
|
||||
)
|
||||
namer.nameFor(type, "optype", "drop-type", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "drop type if exists " + type.getKeyspace() + "." + type.getName() + ";",
|
||||
"timeout", timeouts.get("drop")
|
||||
)
|
||||
);
|
||||
}
|
||||
return dropTypesBlock;
|
||||
@ -669,11 +669,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
dropTypesBlock.put("ops", ops);
|
||||
for (CqlType type : model.getTypeDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(type, "optype", "drop-keyspace", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "drop keyspace if exists " + type.getKeyspace() + ";",
|
||||
"timeout", timeouts.get("drop")
|
||||
)
|
||||
namer.nameFor(type, "optype", "drop-keyspace", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "drop keyspace if exists " + type.getKeyspace() + ";",
|
||||
"timeout", timeouts.get("drop")
|
||||
)
|
||||
);
|
||||
}
|
||||
return dropTypesBlock;
|
||||
@ -687,11 +687,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
for (CqlTable table : model.getTableDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype", "truncate", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "truncate " + table.getFullName() + ";",
|
||||
"timeout", timeouts.get("truncate")
|
||||
)
|
||||
namer.nameFor(table, "optype", "truncate", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "truncate " + table.getFullName() + ";",
|
||||
"timeout", timeouts.get("truncate")
|
||||
)
|
||||
);
|
||||
}
|
||||
return truncateblock;
|
||||
@ -703,11 +703,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
for (CqlKeyspaceDef ks : model.getKeyspaceDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(ks, "optype", "create", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", genKeyspaceDDL(ks),
|
||||
"timeout", timeouts.get("create")
|
||||
)
|
||||
namer.nameFor(ks, "optype", "create", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", genKeyspaceDDL(ks),
|
||||
"timeout", timeouts.get("create")
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@ -722,11 +722,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
model.getTypeDefs().forEach(type -> {
|
||||
ops.put(
|
||||
namer.nameFor(type,"optype","create","blockname",blockname),
|
||||
Map.of(
|
||||
"simple",genTypeDDL(type),
|
||||
"timeout",timeouts.get("create")
|
||||
)
|
||||
namer.nameFor(type, "optype", "create", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", genTypeDDL(type),
|
||||
"timeout", timeouts.get("create")
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
@ -736,13 +736,13 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private String genKeyspaceDDL(CqlKeyspaceDef keyspace) {
|
||||
return """
|
||||
create keyspace KEYSPACE
|
||||
with replication = {REPLICATION}DURABLEWRITES?;
|
||||
"""
|
||||
.replace("KEYSPACE", keyspace.getName())
|
||||
.replace("REPLICATION", keyspace.getReplicationData())
|
||||
.replace("DURABLEWRITES?", keyspace.isDurableWrites() ? "" : "\n and durable writes = false")
|
||||
;
|
||||
create keyspace KEYSPACE
|
||||
with replication = {REPLICATION}DURABLEWRITES?;
|
||||
"""
|
||||
.replace("KEYSPACE", keyspace.getName())
|
||||
.replace("REPLICATION", keyspace.getReplicationData())
|
||||
.replace("DURABLEWRITES?", keyspace.isDurableWrites() ? "" : "\n and durable writes = false")
|
||||
;
|
||||
}
|
||||
|
||||
private Map<String, Object> genCreateTablesOpTemplates(CqlModel model, String blockname) {
|
||||
@ -751,11 +751,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
model.getTableDefs().forEach(table -> {
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype","create","blockname",blockname),
|
||||
Map.of(
|
||||
"simple",genTableDDL(table),
|
||||
"timeout",timeouts.get("create")
|
||||
)
|
||||
namer.nameFor(table, "optype", "create", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", genTableDDL(table),
|
||||
"timeout", timeouts.get("create")
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
@ -766,14 +766,14 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private String genTypeDDL(CqlType type) {
|
||||
return """
|
||||
create type KEYSPACE.TYPENAME (
|
||||
TYPEDEF
|
||||
);
|
||||
"""
|
||||
.replace("KEYSPACE", type.getKeyspace().getName())
|
||||
.replace("TYPENAME", type.getName())
|
||||
.replace("TYPEDEF", type.getColumnDefs().stream()
|
||||
.map(def -> def.getName() + " " + def.getTypedef()).collect(Collectors.joining(",\n")));
|
||||
create type KEYSPACE.TYPENAME (
|
||||
TYPEDEF
|
||||
);
|
||||
"""
|
||||
.replace("KEYSPACE", type.getKeyspace().getName())
|
||||
.replace("TYPENAME", type.getName())
|
||||
.replace("TYPEDEF", type.getColumnDefs().stream()
|
||||
.map(def -> def.getName() + " " + def.getTypedef()).collect(Collectors.joining(",\n")));
|
||||
}
|
||||
|
||||
private Object genTableDDL(CqlTable cqltable) {
|
||||
@ -782,16 +782,16 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
}
|
||||
|
||||
return """
|
||||
create table if not exists KEYSPACE.TABLE (
|
||||
COLUMN_DEFS,
|
||||
primary key (PRIMARYKEY)
|
||||
)CLUSTERING;
|
||||
"""
|
||||
.replace("KEYSPACE", cqltable.getKeyspace().getName())
|
||||
.replace("TABLE", cqltable.getName())
|
||||
.replace("COLUMN_DEFS", genTableColumnDDL(cqltable))
|
||||
.replace("PRIMARYKEY", genPrimaryKeyDDL(cqltable))
|
||||
.replace("CLUSTERING", genTableClusteringOrderDDL(cqltable));
|
||||
create table if not exists KEYSPACE.TABLE (
|
||||
COLUMN_DEFS,
|
||||
primary key (PRIMARYKEY)
|
||||
)CLUSTERING;
|
||||
"""
|
||||
.replace("KEYSPACE", cqltable.getKeyspace().getName())
|
||||
.replace("TABLE", cqltable.getName())
|
||||
.replace("COLUMN_DEFS", genTableColumnDDL(cqltable))
|
||||
.replace("PRIMARYKEY", genPrimaryKeyDDL(cqltable))
|
||||
.replace("CLUSTERING", genTableClusteringOrderDDL(cqltable));
|
||||
|
||||
}
|
||||
|
||||
@ -829,8 +829,8 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private String genTableColumnDDL(CqlTable cqltable) {
|
||||
return cqltable.getColumnDefs().stream()
|
||||
.map(cd -> cd.getName() + " " + cd.getTrimmedTypedef())
|
||||
.collect(Collectors.joining(",\n"));
|
||||
.map(cd -> cd.getName() + " " + cd.getTrimmedTypedef())
|
||||
.collect(Collectors.joining(",\n"));
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,33 +1,29 @@
|
||||
# nb -v run driver=cql yaml=cql-iot tags=phase:schema host=dsehost
|
||||
description: An IOT workload with more optimal settings for DSE
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,100) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,100) threads=auto
|
||||
|
||||
bindings:
|
||||
machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID
|
||||
sensor_name: HashedLineToString('data/variable_words.txt')
|
||||
time: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); ToDate()
|
||||
cell_timestamp: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); Mul(1000L)
|
||||
time: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); ToJavaInstant()
|
||||
cell_timestamp: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); Mul(1000L);
|
||||
sensor_value: Normal(0.0,5.0); Add(100.0) -> double
|
||||
station_id: Div(<<sources:10000>>);Mod(<<stations:100>>); ToHashedUUID() -> java.util.UUID
|
||||
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',800,1200)
|
||||
blocks:
|
||||
- tags:
|
||||
phase: schema
|
||||
schema:
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-keyspace: |
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create keyspace if not exists <<keyspace:baselines>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
|
||||
AND durable_writes = true;
|
||||
tags:
|
||||
name: create-keyspace
|
||||
- create-table : |
|
||||
|
||||
create-table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:iot>> (
|
||||
machine_id UUID, // source machine
|
||||
sensor_name text, // sensor name
|
||||
@ -45,63 +41,46 @@ blocks:
|
||||
'compaction_window_unit': 'MINUTES',
|
||||
'split_during_flush': true
|
||||
};
|
||||
tags:
|
||||
name: create-table
|
||||
- truncate-table: |
|
||||
truncate table <<keyspace:baselines>>.<<table:iot>>;
|
||||
tags:
|
||||
name: truncate-table
|
||||
- tags:
|
||||
phase: rampup
|
||||
|
||||
truncate-table: |
|
||||
truncate table <<keyspace:baselines>>.<<table:iot>>;
|
||||
rampup:
|
||||
params:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- insert-rampup: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>>
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp}
|
||||
idempotent: true
|
||||
tags:
|
||||
name: insert-rampup
|
||||
- tags:
|
||||
phase: verify
|
||||
type: read
|
||||
idempotent: true
|
||||
ops:
|
||||
rampup-insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>> (machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp};
|
||||
verify:
|
||||
params:
|
||||
ratio: 1
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- select-verify: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>>
|
||||
where machine_id={machine_id} and sensor_name={sensor_name} and time={time};
|
||||
verify-fields: "*, -cell_timestamp"
|
||||
tags:
|
||||
name: select-verify
|
||||
- tags:
|
||||
phase: main
|
||||
type: read
|
||||
verify-fields: "*, -cell_timestamp"
|
||||
ops:
|
||||
select-verify: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>> where machine_id={machine_id}
|
||||
and sensor_name={sensor_name} and time={time};
|
||||
|
||||
main-read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- select-read: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>>
|
||||
where machine_id={machine_id} and sensor_name={sensor_name}
|
||||
limit <<limit:10>>
|
||||
tags:
|
||||
name: select-read
|
||||
- tags:
|
||||
phase: main
|
||||
type: write
|
||||
ops:
|
||||
select-read: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>>
|
||||
where machine_id={machine_id} and sensor_name={sensor_name}
|
||||
limit <<limit:10>>;
|
||||
main-write:
|
||||
params:
|
||||
ratio: <<write_ratio:9>>
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- insert-main: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>>
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp}
|
||||
idempotent: true
|
||||
tags:
|
||||
name: insert-main
|
||||
idempotent: true
|
||||
ops:
|
||||
insert-main: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>>
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp};
|
||||
|
@ -1,4 +1,3 @@
|
||||
# nb -v run driver=cql yaml=cql-iot tags=phase:schema host=dsehost
|
||||
description: |
|
||||
Time-series data model and access patterns. (use cql-timeseries instead)
|
||||
This is the same a cql-timeseries, which is the preferred name as it is
|
||||
@ -8,11 +7,11 @@ scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
astra:
|
||||
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
params:
|
||||
instrument: TEMPLATE(instrument,false)
|
||||
|
@ -1,14 +1,15 @@
|
||||
min_version: "5.17.1"
|
||||
description: A workload with only text keys and text values
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
astra:
|
||||
schema: run driver=cql tags==phase:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
seq_key: Mod(<<keycount:1000000000>>); ToString() -> String
|
||||
@ -17,80 +18,53 @@ bindings:
|
||||
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
|
||||
|
||||
blocks:
|
||||
- name: schema
|
||||
tags:
|
||||
phase: schema
|
||||
schema:
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-table: |
|
||||
ops:
|
||||
create-table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:keyvalue>> (
|
||||
key text,
|
||||
value text,
|
||||
PRIMARY KEY (key)
|
||||
);
|
||||
tags:
|
||||
name: create-table
|
||||
- name: schema-astra
|
||||
tags:
|
||||
phase: schema-astra
|
||||
|
||||
schema-astra:
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-table: |
|
||||
ops:
|
||||
create-table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:keyvalue>> (
|
||||
key text,
|
||||
value text,
|
||||
PRIMARY KEY (key)
|
||||
);
|
||||
tags:
|
||||
name: create-table-astra
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup
|
||||
rampup:
|
||||
params:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- rampup-insert: |
|
||||
ops:
|
||||
rampup-insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:keyvalue>>
|
||||
(key, value)
|
||||
values ({seq_key},{seq_value});
|
||||
tags:
|
||||
name: rampup-insert
|
||||
- name: verify
|
||||
tags:
|
||||
phase: verify
|
||||
type: read
|
||||
verify:
|
||||
params:
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- verify-select: |
|
||||
verify-fields: key->seq_key, value->seq_value
|
||||
ops:
|
||||
verify-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:keyvalue>> where key={seq_key};
|
||||
verify-fields: key->seq_key, value->seq_value
|
||||
tags:
|
||||
name: verify
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
main-read:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- main-select: |
|
||||
ops:
|
||||
main-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:keyvalue>> where key={rw_key};
|
||||
tags:
|
||||
name: main-select
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
main-write:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- main-insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:keyvalue>>
|
||||
(key, value) values ({rw_key}, {rw_value});
|
||||
tags:
|
||||
name: main-insert
|
||||
ops:
|
||||
main-insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:keyvalue>> (key, value) values ({rw_key}, {rw_value});
|
||||
|
@ -0,0 +1,79 @@
|
||||
description: |
|
||||
A cql-starter workload primarily for:
|
||||
* Cassandra: 3.x, 4.x.
|
||||
* DataStax Enterprise: 6.8.x.
|
||||
* DataStax Astra.
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10) threads=auto
|
||||
astra:
|
||||
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10) threads=auto
|
||||
|
||||
params:
|
||||
x: y
|
||||
|
||||
bindings:
|
||||
machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID
|
||||
message: ToString(); TextOfFile('data/cql-starter-message.txt')
|
||||
time: Mul(100L); Div(10000L); ToJavaInstant()
|
||||
timestamp: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); Mul(1000L);
|
||||
|
||||
blocks:
|
||||
schema:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create keyspace if not exists <<keyspace:starter>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
|
||||
AND durable_writes = true;
|
||||
create-table: |
|
||||
create table if not exists <<keyspace:starter>>.<<table:cqlstarter>> (
|
||||
machine_id UUID,
|
||||
message text,
|
||||
time timestamp,
|
||||
PRIMARY KEY ((machine_id), time)
|
||||
) WITH CLUSTERING ORDER BY (time DESC);
|
||||
# truncate-table: |
|
||||
# truncate table <<keyspace:starter>>.<<table:cqlstarter>>;
|
||||
schema-astra:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-table-astra: |
|
||||
create table if not exists <<keyspace:starter>>.<<table:cqlstarter>> (
|
||||
machine_id UUID,
|
||||
message text,
|
||||
time timestamp,
|
||||
PRIMARY KEY ((machine_id), time)
|
||||
) WITH CLUSTERING ORDER BY (time DESC);
|
||||
rampup:
|
||||
params:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
idempotent: true
|
||||
ops:
|
||||
insert-rampup: |
|
||||
insert into <<keyspace:starter>>.<<table:cqlstarter>> (machine_id, message, time)
|
||||
values ({machine_id}, {message}, {time}) using timestamp {timestamp};
|
||||
main-read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
ops:
|
||||
select-read: |
|
||||
select * from <<keyspace:starter>>.<<table:cqlstarter>>
|
||||
where machine_id={machine_id};
|
||||
main-write:
|
||||
params:
|
||||
ratio: <<write_ratio:9>>
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
idempotent: true
|
||||
ops:
|
||||
insert-main: |
|
||||
insert into <<keyspace:starter>>.<<table:cqlstarter>>
|
||||
(machine_id, message, time) values ({machine_id}, {message}, {time}) using timestamp {timestamp};
|
@ -1,14 +1,15 @@
|
||||
min_version: "5.17.1"
|
||||
description: A tabular workload with partitions, clusters, and data fields
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:main-*.* cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
astra:
|
||||
schema: run driver=cql tags==phase:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:main-*.* cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# for ramp-up and verify
|
||||
@ -25,88 +26,60 @@ bindings:
|
||||
data_write: Hash(); HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150) -> String
|
||||
|
||||
blocks:
|
||||
- name: schema
|
||||
tags:
|
||||
phase: schema
|
||||
schema:
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-keyspace: |
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create keyspace if not exists <<keyspace:baselines>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
|
||||
AND durable_writes = true;
|
||||
tags:
|
||||
name: create-keyspace
|
||||
- create-table: |
|
||||
create-table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:tabular>> (
|
||||
part text,
|
||||
clust text,
|
||||
data text,
|
||||
PRIMARY KEY (part,clust)
|
||||
);
|
||||
tags:
|
||||
name: create-table
|
||||
- name: schema-astra
|
||||
tags:
|
||||
phase: schema-astra
|
||||
schema-astra:
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-table: |
|
||||
ops:
|
||||
create-table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:tabular>> (
|
||||
part text,
|
||||
clust text,
|
||||
data text,
|
||||
PRIMARY KEY (part,clust)
|
||||
);
|
||||
tags:
|
||||
name: create-table-astra
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup
|
||||
rampup:
|
||||
params:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- rampup-insert: |
|
||||
ops:
|
||||
rampup-insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:tabular>>
|
||||
(part,clust,data)
|
||||
values ({part_layout},{clust_layout},{data})
|
||||
tags:
|
||||
name: rampup-insert
|
||||
- name: verify
|
||||
tags:
|
||||
phase: verify
|
||||
type: read
|
||||
verify:
|
||||
params:
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- verify-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_layout} and clust={clust_layout}
|
||||
tags:
|
||||
name: verify-select
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
ops:
|
||||
verify-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_layout} and clust={clust_layout};
|
||||
|
||||
main-read:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- main-select: |
|
||||
ops:
|
||||
main-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_read} limit {limit};
|
||||
tags:
|
||||
name: main-select
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
|
||||
main-write:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- main-write: |
|
||||
ops:
|
||||
main-write: |
|
||||
insert into <<keyspace:baselines>>.<<table:tabular>>
|
||||
(part, clust, data)
|
||||
values ({part_write},{clust_write},{data_write})
|
||||
tags:
|
||||
name: main-write
|
||||
(part, clust, data) values ({part_write},{clust_write},{data_write});
|
@ -1,13 +1,13 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: creates local graphs which resemble a wagon-wheel topology, using
|
||||
DSE Graph, version 6.8 or newer
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
creategraph: run driver=cqld4 graphname=graph_wheels tags=phase:create-graph cycles===UNDEF
|
||||
schema: run driver=cqld4 graphname=graph_wheels tags=phase:graph-schema cycles===UNDEF
|
||||
rampup: run driver==cqld4 graphname=graph_wheels tags=phase:rampup cycles=1
|
||||
creategraph: run driver=cqld4 graphname=graph_wheels tags=block:create-graph cycles===UNDEF
|
||||
schema: run driver=cqld4 graphname=graph_wheels tags=block:graph-schema cycles===UNDEF
|
||||
rampup: run driver==cqld4 graphname=graph_wheels tags=block:rampup cycles=1
|
||||
drop-graph: run driver=cqld4 graphname=graph_wheels tags=block:drop-graph cycles===UNDEF
|
||||
creategraph-classic: run driver=cqld4 graphname=graph_wheels tags=block:create-graph-classic cycles===UNDEF
|
||||
fluent: run driver=cqld4 graphname=graph_wheels tags=block:fluent cycles=10
|
||||
@ -40,16 +40,12 @@ blocks:
|
||||
.classicEngine()
|
||||
.create()
|
||||
create-graph:
|
||||
tags:
|
||||
phase: create-graph
|
||||
statements:
|
||||
creategraph:
|
||||
type: gremlin
|
||||
script: >-
|
||||
system.graph('<<graphname:graph_wheels>>').ifNotExists().create()
|
||||
create-schema:
|
||||
tags:
|
||||
phase: graph-schema
|
||||
statements:
|
||||
graph-schema:
|
||||
type: gremlin
|
||||
@ -78,7 +74,7 @@ blocks:
|
||||
.create()
|
||||
dev-mode:
|
||||
tags:
|
||||
phase: dev-mode
|
||||
block: dev-mode
|
||||
statements:
|
||||
dev-mode:
|
||||
type: gremlin
|
||||
@ -87,7 +83,7 @@ blocks:
|
||||
schema.config().option('graph.schema_mode').set('Development');
|
||||
prod-mode:
|
||||
tags:
|
||||
phase: prod-mode
|
||||
block: prod-mode
|
||||
statements:
|
||||
prod-mode:
|
||||
type: gremlin
|
||||
@ -96,7 +92,7 @@ blocks:
|
||||
schema.config().option('graph.schema_mode').set('Production');
|
||||
rampup:
|
||||
tags:
|
||||
phase: rampup
|
||||
block: rampup
|
||||
statements:
|
||||
main-add:
|
||||
type: gremlin
|
||||
|
@ -1,3 +1,4 @@
|
||||
min_version: "5.17.1"
|
||||
description: |
|
||||
This is a workload which creates an incrementally growing dataset over cycles.
|
||||
|
||||
@ -26,15 +27,12 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run tags=phase:schema threads==1
|
||||
# rampup: run tags=phase:rampup cycles===TEMPLATE(rampup-cycles,100000) threads=auto
|
||||
main: run tags=block:"main.*" cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
default-schema: run tags=block:schema threads==1
|
||||
# default-rampup: run tags=phase:rampup cycles===TEMPLATE(rampup-cycles,100000) threads=auto
|
||||
default-main: run tags=block:"main.* cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
schema: run tags=block:schema.* threads==1
|
||||
main: run tags=block:main-.*.* cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
default-schema: run tags=block:"schema.*" threads==1
|
||||
default-main: run tags=block:"main.*" cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
astra:
|
||||
schema: run tags=block:astra-schema threads==1
|
||||
# rampup: run tags=phase:rampup cycles===TEMPLATE(rampup-cycles,0) threads=auto
|
||||
main: run tags=block:"main.*" cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
|
||||
params:
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
A workload with only text keys and text values which range in size from 50K to 150K.
|
||||
@ -83,5 +83,4 @@ blocks:
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
statements:
|
||||
main-insert: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue)
|
||||
(key, value) values ({rw_key}, {rw_value});
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) (key, value) values ({rw_key}, {rw_value});
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
A workload with only text keys and text values.
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
A tabular workload with partitions, clusters, and data fields
|
||||
@ -28,12 +28,12 @@ description: |
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10B) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,100M) threads=auto
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,100) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,100) threads=auto
|
||||
astra:
|
||||
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,100) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,100) threads=auto
|
||||
|
||||
params:
|
||||
instrument: true
|
||||
@ -100,13 +100,13 @@ blocks:
|
||||
rampup-insert: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
|
||||
(part,clust,data0,data1,data2,data3,data4,data5,data6,data7)
|
||||
values ({part_layout},{clust_layout},{data0},{data1},{data2},{data3},{data4},{data5},{data6},{data7})
|
||||
values ({part_layout},{clust_layout},{data0},{data1},{data2},{data3},{data4},{data5},{data6},{data7});
|
||||
verify:
|
||||
params:
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
verify-select: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) where part={part_layout} and clust={clust_layout}
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) where part={part_layout} and clust={clust_layout};
|
||||
main-read:
|
||||
params:
|
||||
ratio: 1
|
||||
@ -136,4 +136,4 @@ blocks:
|
||||
main-write: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
|
||||
(part, clust, data0,data1,data2,data3,data4,data5,data6,data7)
|
||||
values ({part_write},{clust_write},{data0},{data1},{data2},{data3},{data4},{data5},{data6},{data7})
|
||||
values ({part_write},{clust_write},{data0},{data1},{data2},{data3},{data4},{data5},{data6},{data7})
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a time-series data model and access patterns.
|
||||
@ -82,11 +82,12 @@ blocks:
|
||||
ratio: 1
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
instrument: TEMPLATE(instrument-reads,TEMPLATE(instrument,false))
|
||||
verify-fields: "*, -cell_timestamp"
|
||||
ops:
|
||||
select-verify: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
|
||||
where machine_id={machine_id} and sensor_name={sensor_name} and time={time};
|
||||
verify-fields: "*, -cell_timestamp"
|
||||
|
||||
main-read:
|
||||
params:
|
||||
ratio: TEMPLATE(read_ratio,1)
|
||||
@ -96,7 +97,7 @@ blocks:
|
||||
select-read: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
|
||||
where machine_id={machine_id} and sensor_name={sensor_name}
|
||||
limit TEMPLATE(limit,10)
|
||||
limit TEMPLATE(limit,10);
|
||||
main-write:
|
||||
params:
|
||||
ratio: TEMPLATE(write_ratio,9)
|
||||
@ -108,4 +109,4 @@ blocks:
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp}
|
||||
using timestamp {cell_timestamp};
|
||||
|
@ -1,9 +1,10 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==phase:schema cycles==UNDEF threads==1
|
||||
rampup: run driver=cql tags==phase:rampup cycles=TEMPLATE(rampup-cycles,100K) threads=auto
|
||||
schema: run driver=cql tags==block:schema cycles==UNDEF threads==1
|
||||
rampup: run driver=cql tags==block:rampup cycles=TEMPLATE(rampup-cycles,100K) threads=auto
|
||||
main: run driver=cql tags==block:"main" cycles===TEMPLATE(main-cycles,100K) threads=auto
|
||||
|
||||
bindings:
|
||||
userid: Template('user-{}',ToString()); SaveString('userid');
|
||||
|
@ -126,7 +126,7 @@ timeouts:
|
||||
|
||||
|
||||
blockplan:
|
||||
# not needed when tags=block:'schema.*'
|
||||
# not needed when tags=block:"schema.*"
|
||||
# schema: schema-keyspaces, schema-tables, schema-types
|
||||
schema-keyspaces: schema-keyspaces
|
||||
schema-tables: schema-tables
|
||||
|
@ -160,7 +160,7 @@ activity types.
|
||||
- **ssl** - specifies the type of the SSL implementation.
|
||||
Disabled by default, possible values are `jdk` and `openssl`.
|
||||
|
||||
[Additional parameters may need to be provided](ssl.md).
|
||||
See the ssl help topic for more details with `nb5 help ssl` for more details.
|
||||
|
||||
- **jmxreporting** - enable JMX reporting if needed.
|
||||
Examples:
|
||||
|
@ -0,0 +1 @@
|
||||
Welcome to cql-starter!
|
@ -1,18 +1,18 @@
|
||||
description: Auto-generated workload from source schema.
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags=block:schema.* threads===UNDEF cycles===UNDEF
|
||||
rampup: run driver=cql tags=block:rampup.* threads=auto cycles===TEMPLATE(rampup-cycles,10000)
|
||||
main: run driver=cql tags=block:main.* threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
schema: run driver=cql tags=block:"schema.*" threads===UNDEF cycles===UNDEF
|
||||
rampup: run driver=cql tags=block:"rampup.*" threads=auto cycles===TEMPLATE(rampup-cycles,10000)
|
||||
main: run driver=cql tags=block:"main.*" threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main-insert: run driver=cql tags=block:main-insert threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main-select: run driver=cql tags=block:main-select threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main-scan: run driver=cql tags=block:main-scan threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main-update: run driver=cql tags=block:main-update threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
truncate: run driver=cql tags=block:truncate.* threads===UNDEF cycles===UNDEF
|
||||
truncate: run driver=cql tags=block:'truncate.*' threads===UNDEF cycles===UNDEF
|
||||
schema-keyspaces: run driver=cql tags=block:schema-keyspaces threads===UNDEF cycles===UNDEF
|
||||
schema-types: run driver=cql tags=block:schema-types threads===UNDEF cycles===UNDEF
|
||||
schema-tables: run driver=cql tags=block:schema-tables threads===UNDEF cycles===UNDEF
|
||||
drop: run driver=cql tags=block:drop.* threads===UNDEF cycles===UNDEF
|
||||
drop: run driver=cql tags=block:'drop.*' threads===UNDEF cycles===UNDEF
|
||||
drop-tables: run driver=cql tags=block:drop-tables threads===UNDEF cycles===UNDEF
|
||||
drop-types: run driver=cql tags=block:drop-types threads===UNDEF cycles===UNDEF
|
||||
drop-keyspaces: run driver=cql tags=block:drop-keyspaces threads===UNDEF cycles===UNDEF
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
A workload with only text keys and text values. This is based on the CQL keyvalue workloads as found
|
||||
@ -6,9 +6,9 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=dynamodb tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=dynamodb tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup: run driver=dynamodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=dynamodb tags=="block:main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=dynamodb tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
read: run driver=dynamodb tags==block:main-read cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
write: run driver=dynamodb tags==block:main-write cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
Run a read/write workload against DynamoDB with varying field sizes and query patterns
|
||||
|
||||
scenarios:
|
||||
schema: run driver=dynamodb tags=block:schema region=us-east-1
|
||||
schema: run driver=dynamodb tags=block:"schema.*" region=us-east-1
|
||||
rampup: run driver=dynamodb tags=block:rampup region=us-east-1
|
||||
read: run driver=dynamodb tags=block:read region=us-east-1
|
||||
main: run driver=dynamodb tags=block:"main.*" region=us-east-1
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a time-series data model and access patterns. This is based on the
|
||||
@ -11,7 +11,7 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=dynamodb tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=dynamodb tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup: run driver=dynamodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=dynamodb tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
delete:
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a key-value data model and access patterns.
|
||||
@ -9,8 +9,8 @@ description: |
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
rampup: run driver=http tags==block:"rampup.*" cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
@ -19,11 +19,13 @@ bindings:
|
||||
# multiple hosts: restapi_host=host1,host2,host3
|
||||
# multiple weighted hosts: restapi_host=host1:3,host2:7
|
||||
weighted_hosts: WeightedStrings('<<restapi_host:stargate>>')
|
||||
# http request id
|
||||
|
||||
request_id: ToHashedUUID(); ToString();
|
||||
request_token: ToString(); TextOfFile("TEMPLATE(stargate_tokenfile,data/stargate_token.txt)")
|
||||
|
||||
seq_key: Mod(<<keycount:10000000>>); ToString() -> String
|
||||
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
|
||||
|
||||
rw_key: <<keydist:Uniform(0,10000000)->int>>; ToString() -> String
|
||||
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
|
||||
|
||||
@ -35,7 +37,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -48,7 +50,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:keyvalue>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
@ -57,7 +59,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -87,7 +89,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -117,7 +119,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -134,7 +136,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>/{rw_key}
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
@ -147,7 +149,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a tabular workload with partitions, clusters, and data fields.
|
||||
@ -9,9 +9,9 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
schema: run driver=http tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:"rampup.*" cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
@ -20,16 +20,19 @@ bindings:
|
||||
# multiple hosts: restapi_host=host1,host2,host3
|
||||
# multiple weighted hosts: restapi_host=host1:3,host2:7
|
||||
weighted_hosts: WeightedStrings('<<restapi_host:stargate>>')
|
||||
# http request id
|
||||
request_id: ToHashedUUID(); ToString();
|
||||
request_token: ToString(); TextOfFile("TEMPLATE(stargate_tokenfile,data/stargate_token.txt)")
|
||||
|
||||
# for ramp-up and verify
|
||||
part_layout: Div(<<partsize:1000000>>); ToString() -> String
|
||||
clust_layout: Mod(<<partsize:1000000>>); ToString() -> String
|
||||
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150); URLEncode();
|
||||
|
||||
# for read
|
||||
limit: Uniform(1,10) -> int
|
||||
part_read: Uniform(0,<<partcount:100>>)->int; ToString() -> String
|
||||
clust_read: Add(1); Uniform(0,<<partsize:1000000>>)->int; ToString() -> String
|
||||
|
||||
# for write
|
||||
part_write: Hash(); Uniform(0,<<partcount:100>>)->int; ToString() -> String
|
||||
clust_write: Hash(); Add(1); Uniform(0,<<partsize:1000000>>)->int; ToString() -> String
|
||||
@ -43,7 +46,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -56,7 +59,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:tabular>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
@ -65,7 +68,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -102,7 +105,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -120,7 +123,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>/{part_read}&page-size={limit}
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
|
||||
main-write:
|
||||
@ -132,7 +135,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a time-series data model and access patterns.
|
||||
@ -12,9 +12,9 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=cql tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
schema-astra: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
rampup: run driver=http tags==block:"rampup.*" cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
@ -24,8 +24,10 @@ bindings:
|
||||
# multiple hosts: restapi_host=host1,host2,host3
|
||||
# multiple weighted hosts: restapi_host=host1:3,host2:7
|
||||
weighted_hosts: WeightedStrings('<<restapi_host:stargate>>')
|
||||
|
||||
# http request id
|
||||
request_id: ToHashedUUID(); ToString();
|
||||
request_token: ToString(); TextOfFile("TEMPLATE(stargate_tokenfile,data/stargate_token.txt)")
|
||||
|
||||
machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID
|
||||
sensor_name: HashedLineToString('data/variable_words.txt')
|
||||
@ -42,7 +44,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -55,7 +57,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:iot>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
@ -64,7 +66,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -123,7 +125,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -144,7 +146,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>?where=URLENCODE[[{"machine_id":{"$eq":"{machine_id}"},"sensor_name":{"$eq":"{sensor_name}"}}]]&page-size=<<limit:10>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
|
||||
main-write:
|
||||
@ -156,7 +158,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
|
@ -1,6 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
|
||||
# nb -v run driver=http yaml=http-docsapi-crud-basic tags=phase:schema docsapi_host=my_docsapi_host auth_token=$AUTH_TOKEN
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates CRUD operations for the Stargate Documents API.
|
||||
@ -9,11 +7,11 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
write: run driver=http tags==block:"write.*" cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=http tags==block:"read.*" cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=http tags==block:"update.*" cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=http tags==block:"delete.*" cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
schema: run driver=http tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
write: run driver=http tags==block:'write.*' cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=http tags==block:'read.*' cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=http tags==block:'update.*' cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=http tags==block:'delete.*' cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates CRUD operations for the Stargate Documents API.
|
||||
@ -7,11 +7,11 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
write: run driver=http tags==name:"write.*" cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=http tags==name:"read.*" cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=http tags==name:"update.*" cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=http tags==name:"delete.*" cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
schema: run driver=http tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
write: run driver=http tags==name:'write.*' cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=http tags==name:'read.*' cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=http tags==name:'update.*' cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=http tags==name:'delete.*' cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a key-value data model and access patterns.
|
||||
@ -10,7 +10,7 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=http tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates advanced search filter combinations for the Stargate Documents API.
|
||||
@ -15,10 +15,10 @@ description: |
|
||||
# complex2: (match1 LTE 0 OR match2 EQ "false") AND (match2 EQ "false" OR match3 EQ true)
|
||||
# complex3: (match1 LTE 0 AND match2 EQ "true") OR (match2 EQ "false" AND match3 EQ true)
|
||||
scenarios:
|
||||
schema: run driver=http tags==phase:schema threads==<<threads:1>> cycles==UNDEF
|
||||
schema: run driver=http tags==block:"schema.*" threads==<<threads:1>> cycles==UNDEF
|
||||
rampup:
|
||||
write: run driver=http tags==name:"rampup-put.*" cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=<<threads:auto>> errors=timer,warn
|
||||
read: run driver=http tags==phase:"rampup-get.*" cycles===TEMPLATE(rampup-cycles, 10000000) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
write: run driver=http tags==name:'rampup-put.*' cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=<<threads:auto>> errors=timer,warn
|
||||
read: run driver=http tags==block:'rampup-get.*' cycles===TEMPLATE(rampup-cycles, 10000000) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
main:
|
||||
all: run driver=http tags==block:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
get-in: run driver=http tags==name:main-get-in,filter:in cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates basic search operations for the Stargate Documents API.
|
||||
@ -7,10 +7,10 @@ description: |
|
||||
Note that docsapi_port should reflect the port where the Docs API is exposed (defaults to 8180).
|
||||
|
||||
scenarios:
|
||||
schema: run driver=http tags==block:schema threads==<<threads:1>> cycles==UNDEF
|
||||
schema: run driver=http tags==block:"schema.*" threads==<<threads:1>> cycles==UNDEF
|
||||
rampup:
|
||||
write: run driver=http tags==name:"rampup-put.*" cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=<<threads:auto>> errors=timer,warn
|
||||
read: run driver=http tags==name:"rampup-get.*" cycles===TEMPLATE(rampup-cycles, 10000000) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
write: run driver=http tags==name:'rampup-put.*' cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=<<threads:auto>> errors=timer,warn
|
||||
read: run driver=http tags==name:'rampup-get.*' cycles===TEMPLATE(rampup-cycles, 10000000) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
main:
|
||||
all: run driver=http tags==block:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
get-eq: run driver=http tags==name:main-get-eq,filter:eq cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a key-value data model and access patterns.
|
||||
@ -9,7 +9,7 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=http tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a tabular workload with partitions, clusters, and data fields.
|
||||
@ -10,7 +10,7 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=http tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
@ -39,7 +39,7 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
tags:
|
||||
phase: schema
|
||||
block: schema
|
||||
ops:
|
||||
create-keyspace:
|
||||
method: POST
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a time-series data model and access patterns.
|
||||
@ -15,7 +15,7 @@ scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
# TODO
|
||||
# - do we need a truncate schema / namespace at the end
|
||||
@ -13,9 +13,9 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=http tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==phase:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
# TODO
|
||||
# - do we need a truncate schema / namespace at the end
|
||||
@ -15,7 +15,7 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=http tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
man: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
# TODO
|
||||
# - do we need a truncate schema / namespace at the end
|
||||
@ -19,7 +19,7 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=http tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
|
1
adapter-http/src/main/resources/data/stargate_token.txt
Normal file
1
adapter-http/src/main/resources/data/stargate_token.txt
Normal file
@ -0,0 +1 @@
|
||||
# <<put-token-here>>
|
@ -42,7 +42,7 @@
|
||||
<dependency>
|
||||
<groupId>org.mongodb</groupId>
|
||||
<artifactId>mongodb-driver-sync</artifactId>
|
||||
<version>4.8.1</version>
|
||||
<version>4.8.2</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
|
@ -1,17 +1,17 @@
|
||||
# Connection Guide: https://www.mongodb.com/docs/drivers/java/sync/current/fundamentals/connection/
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags=block:schema connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags=block:"schema.*" connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags=block:rampup cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags='block:main-.*' cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
min_version: "4.17.31"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload is analogous to the cql-keyvalue2 workload, just implemented for MongoDB.
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=mongodb tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=mongodb tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=mongodb tags==block:"main-.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=mongodb tags==block:'main-.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
drop: run driver=mongodb tags==block:drop-collection threads==1 cycles==UNDEF
|
||||
|
||||
params:
|
||||
|
@ -1,17 +1,17 @@
|
||||
# Connection Guide: https://www.mongodb.com/docs/drivers/java/sync/current/fundamentals/connection/
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags=block:schema connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags=block:"schema.*" connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags=block:rampup cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags='block:main.*' cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
min_version: "4.17.31"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload is analogous to the cql-tabular2 workload, just implemented for MongoDB.
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=mongodb tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=mongodb tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=mongodb tags==block:"main-.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=mongodb tags==block:'main-.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
drop: run driver=mongodb tags==block:drop-collection threads==1 cycles==UNDEF
|
||||
|
||||
params:
|
||||
|
@ -1,21 +1,21 @@
|
||||
# Connection Guide: https://www.mongodb.com/docs/drivers/java/sync/current/fundamentals/connection/
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags=block:schema connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags=block:"schema.*" connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags=block:rampup cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags='block:main.*' cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
|
||||
# https://www.mongodb.com/community/forums/t/how-to-store-a-uuid-with-binary-subtype-0x04-using-the-mongodb-java-driver/13184
|
||||
# https://www.mongodb.com/community/forums/t/problem-inserting-uuid-field-with-binary-subtype-via-atlas-web-ui/1071/4
|
||||
# https://www.mongodb.com/community/forums/t/timeseries-last-x-documents/186574/5
|
||||
min_version: "4.17.31"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload is analogous to the cql-timeseries2 workload, just implemented for MongoDB.
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=mongodb tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=mongodb tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=mongodb tags==block:"main-.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=mongodb tags==block:'main-.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
drop: run driver=mongodb tags==block:drop-collection threads==1 cycles==UNDEF
|
||||
|
||||
params:
|
||||
|
@ -1,10 +1,9 @@
|
||||
# nb -v run driver=mongodb yaml=mongodb-basic-uuid connection=mongodb://127.0.0.1 database=testdb tags=phase:rampup
|
||||
description: An example of a basic mongo insert and find with UUID
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
- run driver=mongodb tags==phase:rampup cycles===TEMPLATE(rampup-cycles,100000000) threads=auto
|
||||
- run driver=mongodb tags==phase:main cycles===TEMPLATE(main-cycles,100000000) threads=auto
|
||||
rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,100000000) threads=auto
|
||||
main: run driver=mongodb tags==block:main cycles===TEMPLATE(main-cycles,100000000) threads=auto
|
||||
bindings:
|
||||
seq_uuid: Mod(<<uuidCount:100000000>>L); ToHashedUUID() -> java.util.UUID; ToString() -> String
|
||||
rw_uuid: <<uuidDist:Uniform(0,100000000)->long>>; ToHashedUUID() -> java.util.UUID; ToString() -> String
|
||||
@ -12,69 +11,50 @@ bindings:
|
||||
seq_value: Mod(<<valueCount:1000000000>>L); <<valueSizeDist:Hash()>>; ToLong()
|
||||
rw_key: <<keyDist:Uniform(0,1000000)->long>>; ToInt()
|
||||
rw_value: <<valDist:Uniform(0,1000000000)->long>>; <<valueSizeDist:Hash()>>; ToLong()
|
||||
|
||||
blocks:
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup
|
||||
statements:
|
||||
- rampup-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalueuuid>>",
|
||||
documents: [ { _id: UUID("{seq_uuid}"),
|
||||
key: {seq_key},
|
||||
value: NumberLong({seq_value}) } ]
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
tags:
|
||||
name: rampup-insert
|
||||
- name: verify
|
||||
tags:
|
||||
phase: verify
|
||||
type: read
|
||||
params:
|
||||
readPreference: primary
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalueuuid>>",
|
||||
documents: [ { _id: UUID("{seq_uuid}"),
|
||||
key: {seq_key},
|
||||
value: NumberLong({seq_value}) } ]
|
||||
}
|
||||
verify:
|
||||
params:
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- verify-find: |
|
||||
{
|
||||
find: "<<collection:keyvalueuuid>>",
|
||||
filter: { _id: UUID("{seq_uuid}") }
|
||||
}
|
||||
verify-fields: _id->seq_uuid, key->seq_key, value->seq_value
|
||||
tags:
|
||||
name: verify
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
verify-fields: _id->seq_uuid, key->seq_key, value->seq_value
|
||||
ops:
|
||||
verify-find: |
|
||||
{
|
||||
find: "<<collection:keyvalueuuid>>",
|
||||
filter: { _id: UUID("{seq_uuid}") }
|
||||
}
|
||||
main-read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
statements:
|
||||
- main-find: |
|
||||
{
|
||||
find: "<<collection:keyvalueuuid>>",
|
||||
filter: { _id: UUID("{rw_uuid}") }
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
tags:
|
||||
name: main-find
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
type: read
|
||||
readPreference: primary
|
||||
ops:
|
||||
main-find: |
|
||||
{
|
||||
find: "<<collection:keyvalueuuid>>",
|
||||
filter: { _id: UUID("{rw_uuid}") }
|
||||
}
|
||||
main-write:
|
||||
params:
|
||||
ratio: <<write_ratio:1>>
|
||||
statements:
|
||||
- main-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalueuuid>>",
|
||||
documents: [ { _id: UUID("{rw_uuid}")
|
||||
key: {rw_key},
|
||||
value: NumberLong({rw_value}) } ]
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
tags:
|
||||
name: main-insert
|
||||
type: write
|
||||
readPreference: primary
|
||||
ops:
|
||||
main-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalueuuid>>",
|
||||
documents: [ { _id: UUID("{rw_uuid}")
|
||||
key: {rw_key},
|
||||
value: NumberLong({rw_value}) } ]
|
||||
}
|
@ -1,10 +1,10 @@
|
||||
# nb -v run driver=mongodb yaml=mongodb-basic connection=mongodb://127.0.0.1 database=testdb tags=phase:rampup cycles=1M
|
||||
description: An example of a basic mongo insert and find.
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
- run driver=mongodb tags==phase:rampup cycles===TEMPLATE(rampup-cycles,1000000) threads=auto
|
||||
- run driver=mongodb tags==phase:main cycles===TEMPLATE(main-cycles,1000000) threads=auto
|
||||
rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,1000000) threads=auto
|
||||
main: run driver=mongodb tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
seq_key: Mod(<<keyCount:1000000>>L); ToInt()
|
||||
seq_value: Mod(<<valueCount:1000000000>>L); <<valueSizeDist:Hash()>>; ToString() -> String
|
||||
@ -12,46 +12,39 @@ bindings:
|
||||
rw_value: <<valDist:Uniform(0,1000000000)->int>>; <<valueSizeDist:Hash()>>; ToString() -> String
|
||||
|
||||
blocks:
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup
|
||||
statements:
|
||||
- rampup-insert: |
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalue>>",
|
||||
documents: [ { _id: {seq_key},
|
||||
value: {seq_value} } ]
|
||||
documents: [ { _id: {seq_key}, value: {seq_value} } ]
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
tags:
|
||||
name: rampup-insert
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
|
||||
main-read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
statements:
|
||||
- main-find: |
|
||||
{
|
||||
find: "<<collection:keyvalue>>",
|
||||
filter: { _id: {rw_key} }
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
readPreference: primary
|
||||
type: read
|
||||
ops:
|
||||
main-find: |
|
||||
{
|
||||
find: "<<collection:keyvalue>>",
|
||||
filter: { _id: {rw_key} }
|
||||
}
|
||||
|
||||
main-write:
|
||||
params:
|
||||
ratio: <<write_ratio:1>>
|
||||
statements:
|
||||
- main-insert: |
|
||||
type: write
|
||||
ops:
|
||||
main-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalue>>",
|
||||
documents: [ { _id: {rw_key},
|
||||
value: {rw_value} } ]
|
||||
documents: [ { _id: {rw_key}, value: {rw_value} } ]
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
readPreference: primary
|
@ -1,4 +1,3 @@
|
||||
# nb -v run driver=mongodb yaml=mongodb-crud-basic tags=phase:schema connection=mongodb://127.0.0.1 database=testdb
|
||||
|
||||
description: |
|
||||
This workload emulates CRUD operations for the mongoDB.
|
||||
@ -7,11 +6,11 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=mongodb tags==phase:schema threads==1 cycles==UNDEF
|
||||
write: run driver=mongodb tags==phase:main,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=mongodb tags==phase:main,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=mongodb tags==phase:main,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=mongodb tags==phase:main,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
schema: run driver=mongodb tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
write: run driver=mongodb tags==block:main-write,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=mongodb tags==block:main-read,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=mongodb tags==block:main-update,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=mongodb tags==block:main-delete,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
|
||||
bindings:
|
||||
seq_key: Mod(<<docscount:10000000>>); ToString() -> String
|
||||
@ -29,62 +28,93 @@ bindings:
|
||||
friend_id: Add(-1); ToHashedUUID(); ToString() -> String
|
||||
|
||||
blocks:
|
||||
- tags:
|
||||
phase: schema
|
||||
statements:
|
||||
- dummy-insert: |
|
||||
{
|
||||
insert: "<<collection:crud_basic>>",
|
||||
documents: [ { _id: "dummyyyy" } ]
|
||||
}
|
||||
schema:
|
||||
ops:
|
||||
dummy-insert: |
|
||||
{
|
||||
insert: "<<collection:crud_basic>>",
|
||||
documents: [ { _id: "dummyyyy" } ]
|
||||
}
|
||||
drop-collection: |
|
||||
{
|
||||
drop: "<<collection:crud_basic>>"
|
||||
}
|
||||
create-collection: |
|
||||
{
|
||||
create: "<<collection:crud_basic>>"
|
||||
}
|
||||
create-indexes: |
|
||||
{
|
||||
createIndexes: "<<collection:crud_basic>>",
|
||||
indexes: [
|
||||
{
|
||||
key: { user_id: 1 },
|
||||
name: "user_id_idx",
|
||||
unique: true
|
||||
},
|
||||
{
|
||||
key: { created_on: 1 },
|
||||
name: "created_on_idx"
|
||||
},
|
||||
{
|
||||
key: { gender: 1 },
|
||||
name: "gender_idx"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
- drop-collection: |
|
||||
{
|
||||
drop: "<<collection:crud_basic>>"
|
||||
}
|
||||
tags:
|
||||
name: drop-collection
|
||||
|
||||
- create-collection: |
|
||||
{
|
||||
create: "<<collection:crud_basic>>"
|
||||
}
|
||||
tags:
|
||||
name: create-collection
|
||||
|
||||
- create-indexes: |
|
||||
{
|
||||
createIndexes: "<<collection:crud_basic>>",
|
||||
indexes: [
|
||||
{
|
||||
key: { user_id: 1 },
|
||||
name: "user_id_idx",
|
||||
unique: true
|
||||
main-write:
|
||||
ops:
|
||||
write-document: |
|
||||
{
|
||||
insert: "<<collection:crud_basic>>",
|
||||
writeConcern: { w: "majority" },
|
||||
documents: [
|
||||
{
|
||||
"_id": "{seq_key}",
|
||||
"user_id": "{user_id}",
|
||||
"created_on": {created_on},
|
||||
"gender": "{gender}",
|
||||
"full_name": "{full_name}",
|
||||
"married": {married},
|
||||
"address": {
|
||||
"primary": {
|
||||
"city": "{city}",
|
||||
"cc": "{country_code}"
|
||||
},
|
||||
"secondary": {}
|
||||
},
|
||||
{
|
||||
key: { created_on: 1 },
|
||||
name: "created_on_idx"
|
||||
},
|
||||
{
|
||||
key: { gender: 1 },
|
||||
name: "gender_idx"
|
||||
}
|
||||
]
|
||||
}
|
||||
tags:
|
||||
name: create-indexes
|
||||
"coordinates": [
|
||||
{lat},
|
||||
{lng}
|
||||
],
|
||||
"children": [],
|
||||
"friends": [
|
||||
"{friend_id}"
|
||||
],
|
||||
"debt": null
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
statements:
|
||||
- write-document: |
|
||||
{
|
||||
insert: "<<collection:crud_basic>>",
|
||||
writeConcern: { w: "majority" },
|
||||
documents: [
|
||||
{
|
||||
main-read:
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:crud_basic>>",
|
||||
filter: { _id: "{random_key}" }
|
||||
}
|
||||
|
||||
main-update:
|
||||
ops:
|
||||
update-document: |
|
||||
{
|
||||
update: "<<collection:crud_basic>>",
|
||||
writeConcern: { w: "majority" },
|
||||
updates: [
|
||||
{
|
||||
q: { _id: "{random_key}" },
|
||||
u: {
|
||||
"_id": "{seq_key}",
|
||||
"user_id": "{user_id}",
|
||||
"created_on": {created_on},
|
||||
@ -108,78 +138,19 @@ blocks:
|
||||
],
|
||||
"debt": null
|
||||
}
|
||||
]
|
||||
}
|
||||
tags:
|
||||
name: write-document
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
statements:
|
||||
- read-document: |
|
||||
{
|
||||
find: "<<collection:crud_basic>>",
|
||||
filter: { _id: "{random_key}" }
|
||||
}
|
||||
tags:
|
||||
name: read-document
|
||||
|
||||
- name: main-update
|
||||
tags:
|
||||
phase: main
|
||||
type: update
|
||||
statements:
|
||||
- update-document: |
|
||||
{
|
||||
update: "<<collection:crud_basic>>",
|
||||
writeConcern: { w: "majority" },
|
||||
updates: [
|
||||
{
|
||||
q: { _id: "{random_key}" },
|
||||
u: {
|
||||
"_id": "{seq_key}",
|
||||
"user_id": "{user_id}",
|
||||
"created_on": {created_on},
|
||||
"gender": "{gender}",
|
||||
"full_name": "{full_name}",
|
||||
"married": {married},
|
||||
"address": {
|
||||
"primary": {
|
||||
"city": "{city}",
|
||||
"cc": "{country_code}"
|
||||
},
|
||||
"secondary": {}
|
||||
},
|
||||
"coordinates": [
|
||||
{lat},
|
||||
{lng}
|
||||
],
|
||||
"children": [],
|
||||
"friends": [
|
||||
"{friend_id}"
|
||||
],
|
||||
"debt": null
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
tags:
|
||||
name: update-document
|
||||
|
||||
- name: main-delete
|
||||
tags:
|
||||
phase: main
|
||||
type: delete
|
||||
statements:
|
||||
- delete-document: |
|
||||
{
|
||||
delete: "<<collection:crud_basic>>",
|
||||
deletes: [
|
||||
{
|
||||
q: { _id: "{seq_key}" },
|
||||
limit: 1
|
||||
}
|
||||
]
|
||||
}
|
||||
main-delete:
|
||||
ops:
|
||||
delete-document: |
|
||||
{
|
||||
delete: "<<collection:crud_basic>>",
|
||||
deletes: [
|
||||
{
|
||||
q: { _id: "{seq_key}" },
|
||||
limit: 1
|
||||
}
|
||||
]
|
||||
}
|
@ -1,4 +1,3 @@
|
||||
# nb -v run driver=mongodb yaml=mongodb-crud-dataset tags=phase:schema connection=mongodb://127.0.0.1 database=testdb dataset_file=path/to/data.json
|
||||
|
||||
description: |
|
||||
This workload emulates CRUD operations for the mongoDB.
|
||||
@ -7,110 +6,85 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=mongodb tags==phase:schema threads==1 cycles==UNDEF
|
||||
write: run driver=mongodb tags==phase:main,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=mongodb tags==phase:main,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=mongodb tags==phase:main,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=mongodb tags==phase:main,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
schema: run driver=mongodb tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
write: run driver=mongodb tags==block:main-write,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=mongodb tags==block:main-read,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=mongodb tags==block:main-update,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=mongodb tags==block:main-delete,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
|
||||
bindings:
|
||||
seq_key: Mod(<<docscount:10000000>>); ToString() -> String
|
||||
random_key: Uniform(0,<<docscount:10000000>>); ToString() -> String
|
||||
|
||||
blocks:
|
||||
- tags:
|
||||
phase: schema
|
||||
statements:
|
||||
- dummy-insert: |
|
||||
{
|
||||
insert: "<<collection:crud_dataset>>",
|
||||
documents: [ { _id: "dummyyyy" } ]
|
||||
}
|
||||
schema:
|
||||
ops:
|
||||
dummy-insert: |
|
||||
{
|
||||
insert: "<<collection:crud_dataset>>",
|
||||
documents: [ { _id: "dummyyyy" } ]
|
||||
}
|
||||
|
||||
- drop-collection: |
|
||||
{
|
||||
drop: "<<collection:crud_dataset>>"
|
||||
}
|
||||
tags:
|
||||
name: drop-collection
|
||||
drop-collection: |
|
||||
{
|
||||
drop: "<<collection:crud_dataset>>"
|
||||
}
|
||||
|
||||
- create-collection: |
|
||||
{
|
||||
create: "<<collection:crud_dataset>>"
|
||||
}
|
||||
tags:
|
||||
name: create-collection
|
||||
create-collection: |
|
||||
{
|
||||
create: "<<collection:crud_dataset>>"
|
||||
}
|
||||
|
||||
- create-indexes: |
|
||||
{
|
||||
createIndexes: "<<collection:crud_dataset>>",
|
||||
indexes: <<indexes:[ { key: { dummy : 1 }, name: "dummy_idx", sparse: true } ]>>
|
||||
}
|
||||
tags:
|
||||
name: create-indexes
|
||||
create-indexes: |
|
||||
{
|
||||
createIndexes: "<<collection:crud_dataset>>",
|
||||
indexes: <<indexes:[ { key: { dummy : 1 }, name: "dummy_idx", sparse: true } ]>>
|
||||
}
|
||||
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
statements:
|
||||
- write-document: |
|
||||
{
|
||||
insert: "<<collection:crud_dataset>>",
|
||||
writeConcern: { w: "majority" },
|
||||
documents: [ { "_id": "{seq_key}", {document_json_without_id} ]
|
||||
}
|
||||
tags:
|
||||
name: write-document
|
||||
main-write:
|
||||
ops:
|
||||
write-document: |
|
||||
{
|
||||
insert: "<<collection:crud_dataset>>",
|
||||
writeConcern: { w: "majority" },
|
||||
documents: [ { "_id": "{seq_key}", {document_json_without_id} ]
|
||||
}
|
||||
bindings:
|
||||
document_json_without_id: ModuloLineToString('<<dataset_file>>'); ReplaceRegex('^\{', '')
|
||||
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
statements:
|
||||
- read-document: |
|
||||
{
|
||||
find: "<<collection:crud_dataset>>",
|
||||
filter: { _id: "{random_key}" }
|
||||
}
|
||||
tags:
|
||||
name: read-document
|
||||
main-read:
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:crud_dataset>>",
|
||||
filter: { _id: "{random_key}" }
|
||||
}
|
||||
|
||||
- name: main-update
|
||||
tags:
|
||||
phase: main
|
||||
type: update
|
||||
statements:
|
||||
- update-document: |
|
||||
{
|
||||
update: "<<collection:crud_dataset>>",
|
||||
writeConcern: { w: "majority" },
|
||||
updates: [
|
||||
{
|
||||
q: { _id: "{random_key}" },
|
||||
u: { "_id": "{random_key}", {document_json_without_id}
|
||||
}
|
||||
]
|
||||
}
|
||||
tags:
|
||||
name: update-document
|
||||
main-update:
|
||||
ops:
|
||||
update-document: |
|
||||
{
|
||||
update: "<<collection:crud_dataset>>",
|
||||
writeConcern: { w: "majority" },
|
||||
updates: [
|
||||
{
|
||||
q: { _id: "{random_key}" },
|
||||
u: { "_id": "{random_key}", {document_json_without_id}
|
||||
}
|
||||
]
|
||||
}
|
||||
bindings:
|
||||
document_json_without_id: ModuloLineToString('<<dataset_file>>'); ReplaceRegex('^\{', '')
|
||||
|
||||
- name: main-delete
|
||||
tags:
|
||||
phase: main
|
||||
type: delete
|
||||
statements:
|
||||
- delete-document: |
|
||||
{
|
||||
delete: "<<collection:crud_dataset>>",
|
||||
deletes: [
|
||||
{
|
||||
q: { _id: "{seq_key}" },
|
||||
limit: 1
|
||||
}
|
||||
]
|
||||
}
|
||||
main-delete:
|
||||
ops:
|
||||
delete-document: |
|
||||
{
|
||||
delete: "<<collection:crud_dataset>>",
|
||||
deletes: [
|
||||
{
|
||||
q: { _id: "{seq_key}" },
|
||||
limit: 1
|
||||
}
|
||||
]
|
||||
}
|
@ -1,4 +1,3 @@
|
||||
# nb -v run driver=mongodb yaml=mongodb-search-basic tags=phase:schema connection=mongodb://127.0.0.1 database=testdb
|
||||
|
||||
description: |
|
||||
This workload emulates basic search operations for the mongoDB.
|
||||
@ -7,15 +6,15 @@ description: |
|
||||
It's a counterpart of the Stargate's Documents API Basic Search workflow.
|
||||
|
||||
scenarios:
|
||||
schema: run driver=mongodb tags==phase:schema threads==1 cycles==UNDEF
|
||||
rampup-write: run driver=mongodb tags==phase:rampup-write cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=auto errors=timer,warn
|
||||
rampup-read: run driver=mongodb tags==phase:rampup-read cycles===TEMPLATE(rampup-cycles, 10000000) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main: run driver=mongodb tags==phase:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-eq: run driver=mongodb tags==phase:main,filter:eq cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-lt: run driver=mongodb tags==phase:main,filter:lt cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-and: run driver=mongodb tags==phase:main,filter:and cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-or: run driver=mongodb tags==phase:main,filter:or cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-or-single-match: run driver=mongodb tags==phase:main,filter:or-single-match cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
schema: run driver=mongodb tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup-write: run driver=mongodb tags==block:rampup-write cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=auto errors=timer,warn
|
||||
rampup-read: run driver=mongodb tags==block:rampup-read cycles===TEMPLATE(rampup-cycles, 10000000) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main: run driver=mongodb tags==block:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-eq: run driver=mongodb tags==block:main-eq,filter:eq cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-lt: run driver=mongodb tags==block:main-lt,filter:lt cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-and: run driver=mongodb tags==block:main-and,filter:and cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-or: run driver=mongodb tags==block:main-or,filter:or cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-or-single-match: run driver=mongodb tags==block:main-or-single-match,filter:or-single-match cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
|
||||
bindings:
|
||||
seq_key: Mod(<<docscount:10000000>>); ToString() -> String
|
||||
@ -34,57 +33,49 @@ bindings:
|
||||
match1: Identity(); CoinFunc(<<match-ratio>>, FixedValue(0), FixedValue(1000))
|
||||
match2: Identity(); CoinFunc(<<match-ratio>>, FixedValue("true"), FixedValue("false"))
|
||||
additional_fields: ListSizedStepped(<<docpadding:0>>,Template("\"{}\":{}",Identity(),Identity())); ToString(); ReplaceAll('\[\"', ',\"'); ReplaceAll('\[', ''); ReplaceAll('\]', '') -> String
|
||||
|
||||
blocks:
|
||||
- tags:
|
||||
phase: schema
|
||||
statements:
|
||||
- dummy-insert: |
|
||||
{
|
||||
insert: "<<collection:search_basic>>",
|
||||
documents: [ { _id: "dummyyyy" } ]
|
||||
}
|
||||
schema:
|
||||
ops:
|
||||
dummy-insert: |
|
||||
{
|
||||
insert: "<<collection:search_basic>>",
|
||||
documents: [ { _id: "dummyyyy" } ]
|
||||
}
|
||||
|
||||
- drop-collection: |
|
||||
{
|
||||
drop: "<<collection:search_basic>>"
|
||||
}
|
||||
tags:
|
||||
name: drop-collection
|
||||
drop-collection: |
|
||||
{
|
||||
drop: "<<collection:search_basic>>"
|
||||
}
|
||||
|
||||
- create-collection: |
|
||||
{
|
||||
create: "<<collection:search_basic>>"
|
||||
}
|
||||
tags:
|
||||
name: create-collection
|
||||
create-collection: |
|
||||
{
|
||||
create: "<<collection:search_basic>>"
|
||||
}
|
||||
|
||||
- create-indexes: |
|
||||
{
|
||||
createIndexes: "<<collection:search_basic>>",
|
||||
indexes: [
|
||||
{
|
||||
key: { user_id: 1 },
|
||||
name: "user_id_idx",
|
||||
unique: true
|
||||
},
|
||||
{
|
||||
key: { created_on: 1 },
|
||||
name: "created_on_idx"
|
||||
},
|
||||
{
|
||||
key: { city: 1 },
|
||||
name: "city_idx"
|
||||
}
|
||||
]
|
||||
}
|
||||
tags:
|
||||
name: create-indexes
|
||||
create-indexes: |
|
||||
{
|
||||
createIndexes: "<<collection:search_basic>>",
|
||||
indexes: [
|
||||
{
|
||||
key: { user_id: 1 },
|
||||
name: "user_id_idx",
|
||||
unique: true
|
||||
},
|
||||
{
|
||||
key: { created_on: 1 },
|
||||
name: "created_on_idx"
|
||||
},
|
||||
{
|
||||
key: { city: 1 },
|
||||
name: "city_idx"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
- name: rampup-write
|
||||
tags:
|
||||
phase: rampup-write
|
||||
statements:
|
||||
- write-document: |
|
||||
rampup-write:
|
||||
ops:
|
||||
write-document:
|
||||
{
|
||||
insert: "<<collection:search_basic>>",
|
||||
writeConcern: { w: "majority" },
|
||||
@ -118,83 +109,62 @@ blocks:
|
||||
}
|
||||
]
|
||||
}
|
||||
tags:
|
||||
name: rampup-write
|
||||
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup-read
|
||||
filter: eq
|
||||
statements:
|
||||
- read-document: |
|
||||
rampup-read:
|
||||
params:
|
||||
filter: eq
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { match1: 0 }
|
||||
}, <<field-projection:null>>
|
||||
tags:
|
||||
name: rampup-read
|
||||
|
||||
- name: main-eq
|
||||
tags:
|
||||
phase: main
|
||||
main-eq:
|
||||
params:
|
||||
filter: eq
|
||||
statements:
|
||||
- read-document: |
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { match3: true }
|
||||
}, <<field-projection:null>>
|
||||
tags:
|
||||
name: read-document
|
||||
}, <<field-projection:null>>
|
||||
|
||||
- name: main-lt
|
||||
tags:
|
||||
phase: main
|
||||
main-lt:
|
||||
params:
|
||||
filter: lt
|
||||
statements:
|
||||
- read-document: |
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { match1: {$lt: 1}}
|
||||
}, <<field-projection:null>>
|
||||
tags:
|
||||
name: read-document
|
||||
}, <<field-projection:null>>
|
||||
|
||||
- name: main-and
|
||||
tags:
|
||||
phase: main
|
||||
main-and:
|
||||
params:
|
||||
filter: and
|
||||
statements:
|
||||
- read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { match1: {$lt: 1}, match2: "true"}
|
||||
}, <<field-projection:null>>
|
||||
tags:
|
||||
name: read-document
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { match1: {$lt: 1}, match2: "true"}
|
||||
}, <<field-projection:null>>
|
||||
|
||||
- name: main-or
|
||||
tags:
|
||||
phase: main
|
||||
main-or:
|
||||
params:
|
||||
filter: or
|
||||
statements:
|
||||
- read-document: |
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { $or: [ {match1: {$lt: 1}}, {match3: true}]}
|
||||
}, <<field-projection:null>>
|
||||
tags:
|
||||
name: read-document
|
||||
}, <<field-projection:null>>
|
||||
|
||||
- name: main-or-single-match
|
||||
tags:
|
||||
phase: main
|
||||
main-or-single-match:
|
||||
params:
|
||||
filter: or-single-match
|
||||
statements:
|
||||
- read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { $or: [ {match1: {$lt: 1}}, {match2: "notamatch"}]}
|
||||
}, <<field-projection:null>>
|
||||
tags:
|
||||
name: read-document
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { $or: [ {match1: {$lt: 1}}, {match2: "notamatch"}]}
|
||||
}, <<field-projection:null>>
|
@ -32,8 +32,10 @@ import org.apache.pulsar.client.admin.PulsarAdminBuilder;
|
||||
import org.apache.pulsar.client.api.*;
|
||||
import org.apache.pulsar.common.schema.KeyValueEncodingType;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public class PulsarSpace implements AutoCloseable {
|
||||
|
||||
@ -50,9 +52,18 @@ public class PulsarSpace implements AutoCloseable {
|
||||
private PulsarAdmin pulsarAdmin;
|
||||
private Schema<?> pulsarSchema;
|
||||
|
||||
private final ConcurrentHashMap<String, Producer<?>> producers = new ConcurrentHashMap<>();
|
||||
private final ConcurrentHashMap<String, Consumer<?>> consumers = new ConcurrentHashMap<>();
|
||||
private final ConcurrentHashMap<String, Reader<?>> readers = new ConcurrentHashMap<>();
|
||||
public record ProducerCacheKey(String producerName, String topicName) {
|
||||
}
|
||||
|
||||
private final ConcurrentHashMap<ProducerCacheKey, Producer<?>> producers = new ConcurrentHashMap<>();
|
||||
|
||||
public record ConsumerCacheKey(String consumerName, String subscriptionName, List<String> topicNameList, String topicPattern) {
|
||||
}
|
||||
private final ConcurrentHashMap<ConsumerCacheKey, Consumer<?>> consumers = new ConcurrentHashMap<>();
|
||||
|
||||
public record ReaderCacheKey(String readerName, String topicName, String startMsgPosStr) {
|
||||
}
|
||||
private final ConcurrentHashMap<ReaderCacheKey, Reader<?>> readers = new ConcurrentHashMap<>();
|
||||
|
||||
|
||||
public PulsarSpace(String spaceName, NBConfiguration cfg) {
|
||||
@ -89,13 +100,11 @@ public class PulsarSpace implements AutoCloseable {
|
||||
public int getProducerSetCnt() { return producers.size(); }
|
||||
public int getConsumerSetCnt() { return consumers.size(); }
|
||||
public int getReaderSetCnt() { return readers.size(); }
|
||||
public Producer<?> getProducer(String name) { return producers.get(name); }
|
||||
public void setProducer(String name, Producer<?> producer) { producers.put(name, producer); }
|
||||
public Consumer<?> getConsumer(String name) { return consumers.get(name); }
|
||||
public void setConsumer(String name, Consumer<?> consumer) { consumers.put(name, consumer); }
|
||||
public Producer<?> getProducer(ProducerCacheKey key, Supplier<Producer<?>> producerSupplier) { return producers.computeIfAbsent(key, __ -> producerSupplier.get()); }
|
||||
|
||||
public Reader<?> getReader(String name) { return readers.get(name); }
|
||||
public void setReader(String name, Reader<?> reader) { readers.put(name, reader); }
|
||||
public Consumer<?> getConsumer(ConsumerCacheKey key, Supplier<Consumer<?>> consumerSupplier) { return consumers.computeIfAbsent(key, __ -> consumerSupplier.get()); }
|
||||
|
||||
public Reader<?> getReader(ReaderCacheKey key, Supplier<Reader<?>> readerSupplier) { return readers.computeIfAbsent(key, __ -> readerSupplier.get()); }
|
||||
|
||||
|
||||
/**
|
||||
|
@ -37,7 +37,6 @@ import java.util.*;
|
||||
import java.util.function.LongFunction;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.regex.PatternSyntaxException;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public abstract class PulsarBaseOpDispenser extends BaseOpDispenser<PulsarOp, PulsarSpace> implements NBNamedElement {
|
||||
@ -239,10 +238,8 @@ public abstract class PulsarBaseOpDispenser extends BaseOpDispenser<PulsarOp, P
|
||||
PulsarAdapterUtil.PRODUCER_CONF_STD_KEY.producerName.label,
|
||||
cycleProducerName);
|
||||
|
||||
String producerCacheKey = PulsarAdapterUtil.buildCacheKey(producerName, topicName);
|
||||
Producer<?> producer = pulsarSpace.getProducer(producerCacheKey);
|
||||
|
||||
if (producer == null) {
|
||||
PulsarSpace.ProducerCacheKey producerCacheKey = new PulsarSpace.ProducerCacheKey(producerName, topicName);
|
||||
return pulsarSpace.getProducer(producerCacheKey, () -> {
|
||||
PulsarClient pulsarClient = pulsarSpace.getPulsarClient();
|
||||
|
||||
// Get other possible producer settings that are set at global level
|
||||
@ -262,21 +259,17 @@ public abstract class PulsarBaseOpDispenser extends BaseOpDispenser<PulsarOp, P
|
||||
producerBuilder = producerBuilder.producerName(producerName);
|
||||
}
|
||||
|
||||
producer = producerBuilder.create();
|
||||
pulsarSpace.setProducer(producerCacheKey, producer);
|
||||
|
||||
Producer<?> producer = producerBuilder.create();
|
||||
pulsarAdapterMetrics.registerProducerApiMetrics(producer,
|
||||
getPulsarAPIMetricsPrefix(
|
||||
PulsarAdapterUtil.PULSAR_API_TYPE.PRODUCER.label,
|
||||
producerName,
|
||||
topicName));
|
||||
}
|
||||
catch (PulsarClientException ple) {
|
||||
return producer;
|
||||
} catch (PulsarClientException ple) {
|
||||
throw new PulsarAdapterUnexpectedException("Failed to create a Pulsar producer.");
|
||||
}
|
||||
}
|
||||
|
||||
return producer;
|
||||
});
|
||||
}
|
||||
|
||||
private List<String> getEffectiveConsumerTopicNameList(String cycleTopicNameListStr) {
|
||||
@ -296,24 +289,6 @@ public abstract class PulsarBaseOpDispenser extends BaseOpDispenser<PulsarOp, P
|
||||
return effectiveTopicNameList;
|
||||
}
|
||||
|
||||
private Pattern getEffectiveConsumerTopicPattern(String cycleTopicPatternStr) {
|
||||
String effectiveTopicPatternStr = getEffectiveConValue(
|
||||
PulsarAdapterUtil.CONF_GATEGORY.Consumer.label,
|
||||
PulsarAdapterUtil.CONSUMER_CONF_STD_KEY.topicsPattern.label,
|
||||
cycleTopicPatternStr);
|
||||
|
||||
Pattern topicsPattern;
|
||||
try {
|
||||
if (!StringUtils.isBlank(effectiveTopicPatternStr))
|
||||
topicsPattern = Pattern.compile(effectiveTopicPatternStr);
|
||||
else
|
||||
topicsPattern = null;
|
||||
} catch (PatternSyntaxException pse) {
|
||||
topicsPattern = null;
|
||||
}
|
||||
return topicsPattern;
|
||||
}
|
||||
|
||||
private SubscriptionType getEffectiveSubscriptionType(String cycleSubscriptionType) {
|
||||
String subscriptionTypeStr = getEffectiveConValue(
|
||||
PulsarAdapterUtil.CONF_GATEGORY.Consumer.label,
|
||||
@ -344,11 +319,10 @@ public abstract class PulsarBaseOpDispenser extends BaseOpDispenser<PulsarOp, P
|
||||
|
||||
List<String> topicNameList = getEffectiveConsumerTopicNameList(cycleTopicNameListStr);
|
||||
|
||||
String topicPatternStr = getEffectiveConValue(
|
||||
String topicPatternStr = StringUtils.trimToNull(getEffectiveConValue(
|
||||
PulsarAdapterUtil.CONF_GATEGORY.Consumer.label,
|
||||
PulsarAdapterUtil.CONSUMER_CONF_STD_KEY.topicsPattern.label,
|
||||
cycleTopicPatternStr);
|
||||
Pattern topicPattern = getEffectiveConsumerTopicPattern(cycleTopicPatternStr);
|
||||
cycleTopicPatternStr));
|
||||
|
||||
String subscriptionName = getEffectiveConValue(
|
||||
PulsarAdapterUtil.CONF_GATEGORY.Consumer.label,
|
||||
@ -368,28 +342,14 @@ public abstract class PulsarBaseOpDispenser extends BaseOpDispenser<PulsarOp, P
|
||||
"creating multiple consumers of \"Exclusive\" subscription type under the same subscription name");
|
||||
}
|
||||
|
||||
if ( (topicNameList.isEmpty() && (topicPattern == null)) ||
|
||||
(!topicNameList.isEmpty() && (topicPattern != null)) ) {
|
||||
if ( (topicNameList.isEmpty() && (topicPatternStr == null)) ||
|
||||
(!topicNameList.isEmpty() && (topicPatternStr != null)) ) {
|
||||
throw new PulsarAdapterInvalidParamException(
|
||||
"Invalid combination of topic name(s) and topic patterns; only specify one parameter!");
|
||||
}
|
||||
|
||||
boolean multiTopicConsumer = (topicNameList.size() > 1 || (topicPattern != null));
|
||||
|
||||
String consumerTopicListString;
|
||||
if (!topicNameList.isEmpty()) {
|
||||
consumerTopicListString = String.join("|", topicNameList);
|
||||
} else {
|
||||
consumerTopicListString = topicPatternStr;
|
||||
}
|
||||
|
||||
String consumerCacheKey = PulsarAdapterUtil.buildCacheKey(
|
||||
consumerName,
|
||||
subscriptionName,
|
||||
consumerTopicListString);
|
||||
Consumer<?> consumer = pulsarSpace.getConsumer(consumerCacheKey);
|
||||
|
||||
if (consumer == null) {
|
||||
return pulsarSpace.getConsumer(
|
||||
new PulsarSpace.ConsumerCacheKey(consumerName, subscriptionName, topicNameList, topicPatternStr), () -> {
|
||||
PulsarClient pulsarClient = pulsarSpace.getPulsarClient();
|
||||
|
||||
// Get other possible consumer settings that are set at global level
|
||||
@ -417,6 +377,7 @@ public abstract class PulsarBaseOpDispenser extends BaseOpDispenser<PulsarOp, P
|
||||
consumerConfToLoad.remove(PulsarAdapterUtil.CONSUMER_CONF_STD_KEY.negativeAckRedeliveryBackoff.label);
|
||||
consumerConfToLoad.remove(PulsarAdapterUtil.CONSUMER_CONF_STD_KEY.ackTimeoutRedeliveryBackoff.label);
|
||||
|
||||
boolean multiTopicConsumer = (topicNameList.size() > 1 || (topicPatternStr != null));
|
||||
if (!multiTopicConsumer) {
|
||||
assert (topicNameList.size() == 1);
|
||||
consumerBuilder = pulsarClient.newConsumer(pulsarSpace.getPulsarSchema());
|
||||
@ -429,6 +390,7 @@ public abstract class PulsarBaseOpDispenser extends BaseOpDispenser<PulsarOp, P
|
||||
consumerBuilder.topics(topicNameList);
|
||||
}
|
||||
else {
|
||||
Pattern topicPattern = Pattern.compile(topicPatternStr);
|
||||
consumerBuilder.topicsPattern(topicPattern);
|
||||
}
|
||||
}
|
||||
@ -465,22 +427,22 @@ public abstract class PulsarBaseOpDispenser extends BaseOpDispenser<PulsarOp, P
|
||||
consumerBuilder.keySharedPolicy(keySharedPolicy);
|
||||
}
|
||||
|
||||
consumer = consumerBuilder.subscribe();
|
||||
pulsarSpace.setConsumer(consumerCacheKey, consumer);
|
||||
Consumer<?> consumer = consumerBuilder.subscribe();
|
||||
|
||||
String consumerTopicListString = (!topicNameList.isEmpty()) ? String.join("|", topicNameList) : topicPatternStr;
|
||||
pulsarAdapterMetrics.registerConsumerApiMetrics(
|
||||
consumer,
|
||||
getPulsarAPIMetricsPrefix(
|
||||
PulsarAdapterUtil.PULSAR_API_TYPE.CONSUMER.label,
|
||||
consumerName,
|
||||
consumerTopicListString));
|
||||
|
||||
return consumer;
|
||||
}
|
||||
catch (PulsarClientException ple) {
|
||||
throw new PulsarAdapterUnexpectedException("Failed to create a Pulsar consumer!");
|
||||
}
|
||||
}
|
||||
|
||||
return consumer;
|
||||
});
|
||||
}
|
||||
|
||||
private static Range[] parseRanges(String ranges) {
|
||||
@ -528,10 +490,7 @@ public abstract class PulsarBaseOpDispenser extends BaseOpDispenser<PulsarOp, P
|
||||
throw new RuntimeException("Reader:: Invalid value for reader start message position!");
|
||||
}
|
||||
|
||||
String readerCacheKey = PulsarAdapterUtil.buildCacheKey(topicName, readerName, startMsgPosStr);
|
||||
Reader<?> reader = pulsarSpace.getReader(readerCacheKey);
|
||||
|
||||
if (reader == null) {
|
||||
return pulsarSpace.getReader(new PulsarSpace.ReaderCacheKey(readerName, topicName, startMsgPosStr), () -> {
|
||||
PulsarClient pulsarClient = pulsarSpace.getPulsarClient();;
|
||||
|
||||
Map<String, Object> readerConf = pulsarSpace.getPulsarNBClientConf().getReaderConfMapTgt();
|
||||
@ -558,17 +517,12 @@ public abstract class PulsarBaseOpDispenser extends BaseOpDispenser<PulsarOp, P
|
||||
// startMsgId = MessageId.latest;
|
||||
//}
|
||||
|
||||
reader = readerBuilder.startMessageId(startMsgId).create();
|
||||
|
||||
return readerBuilder.startMessageId(startMsgId).create();
|
||||
} catch (PulsarClientException ple) {
|
||||
ple.printStackTrace();
|
||||
throw new RuntimeException("Unable to create a Pulsar reader!");
|
||||
}
|
||||
|
||||
pulsarSpace.setReader(readerCacheKey, reader);
|
||||
}
|
||||
|
||||
return reader;
|
||||
});
|
||||
}
|
||||
//
|
||||
//////////////////////////////////////
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
package io.nosqlbench.adapter.pulsar.util;
|
||||
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import io.nosqlbench.adapter.pulsar.exception.PulsarAdapterInvalidParamException;
|
||||
import io.nosqlbench.adapter.pulsar.exception.PulsarAdapterUnexpectedException;
|
||||
@ -23,6 +24,8 @@ import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.pulsar.client.api.Schema;
|
||||
import org.apache.pulsar.common.schema.SchemaInfo;
|
||||
import org.apache.pulsar.common.schema.SchemaType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
@ -30,7 +33,11 @@ import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.*;
|
||||
import java.util.Base64;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ -60,9 +67,6 @@ public class PulsarAdapterUtil {
|
||||
this.label = label;
|
||||
}
|
||||
}
|
||||
public static boolean isValidDocLevelParam(String param) {
|
||||
return Arrays.stream(DOC_LEVEL_PARAMS.values()).anyMatch(t -> t.label.equals(param));
|
||||
}
|
||||
|
||||
///////
|
||||
// Message processing sequence error simulation types
|
||||
@ -77,29 +81,21 @@ public class PulsarAdapterUtil {
|
||||
this.label = label;
|
||||
}
|
||||
|
||||
private static final Map<String, MSG_SEQ_ERROR_SIMU_TYPE> MAPPING = new HashMap<>();
|
||||
|
||||
static {
|
||||
for (MSG_SEQ_ERROR_SIMU_TYPE simuType : values()) {
|
||||
MAPPING.put(simuType.label, simuType);
|
||||
MAPPING.put(simuType.label.toLowerCase(), simuType);
|
||||
MAPPING.put(simuType.label.toUpperCase(), simuType);
|
||||
MAPPING.put(simuType.name(), simuType);
|
||||
MAPPING.put(simuType.name().toLowerCase(), simuType);
|
||||
MAPPING.put(simuType.name().toUpperCase(), simuType);
|
||||
}
|
||||
}
|
||||
private static final Map<String, MSG_SEQ_ERROR_SIMU_TYPE> MAPPING = Stream.of(values())
|
||||
.flatMap(simuType ->
|
||||
Stream.of(simuType.label,
|
||||
simuType.label.toLowerCase(),
|
||||
simuType.label.toUpperCase(),
|
||||
simuType.name(),
|
||||
simuType.name().toLowerCase(),
|
||||
simuType.name().toUpperCase())
|
||||
.distinct().map(key -> Map.entry(key, simuType)))
|
||||
.collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
|
||||
public static Optional<MSG_SEQ_ERROR_SIMU_TYPE> parseSimuType(String simuTypeString) {
|
||||
return Optional.ofNullable(MAPPING.get(simuTypeString.trim()));
|
||||
}
|
||||
}
|
||||
public static boolean isValidSeqErrSimuType(String item) {
|
||||
return Arrays.stream(MSG_SEQ_ERROR_SIMU_TYPE.values()).anyMatch(t -> t.label.equals(item));
|
||||
}
|
||||
public static String getValidSeqErrSimuTypeList() {
|
||||
return Arrays.stream(MSG_SEQ_ERROR_SIMU_TYPE.values()).map(t -> t.label).collect(Collectors.joining(", "));
|
||||
}
|
||||
|
||||
///////
|
||||
// Valid Pulsar API type
|
||||
@ -113,12 +109,15 @@ public class PulsarAdapterUtil {
|
||||
PULSAR_API_TYPE(String label) {
|
||||
this.label = label;
|
||||
}
|
||||
|
||||
private static final Set<String> LABELS = Stream.of(values()).map(v -> v.label).collect(Collectors.toUnmodifiableSet());
|
||||
|
||||
public static boolean isValidLabel(String label) {
|
||||
return LABELS.contains(label);
|
||||
}
|
||||
}
|
||||
public static boolean isValidPulsarApiType(String param) {
|
||||
return Arrays.stream(PULSAR_API_TYPE.values()).anyMatch(t -> t.label.equals(param));
|
||||
}
|
||||
public static String getValidPulsarApiTypeList() {
|
||||
return Arrays.stream(PULSAR_API_TYPE.values()).map(t -> t.label).collect(Collectors.joining(", "));
|
||||
return PULSAR_API_TYPE.isValidLabel(param);
|
||||
}
|
||||
|
||||
|
||||
@ -136,14 +135,16 @@ public class PulsarAdapterUtil {
|
||||
CONF_GATEGORY(String label) {
|
||||
this.label = label;
|
||||
}
|
||||
|
||||
private static final Set<String> LABELS = Stream.of(values()).map(v -> v.label).collect(Collectors.toUnmodifiableSet());
|
||||
|
||||
public static boolean isValidLabel(String label) {
|
||||
return LABELS.contains(label);
|
||||
}
|
||||
}
|
||||
public static boolean isValidConfCategory(String item) {
|
||||
return Arrays.stream(CONF_GATEGORY.values()).anyMatch(t -> t.label.equals(item));
|
||||
return CONF_GATEGORY.isValidLabel(item);
|
||||
}
|
||||
public static String getValidConfCategoryList() {
|
||||
return Arrays.stream(CONF_GATEGORY.values()).map(t -> t.label).collect(Collectors.joining(", "));
|
||||
}
|
||||
|
||||
///////
|
||||
// Valid persistence type
|
||||
public enum PERSISTENT_TYPES {
|
||||
@ -156,9 +157,6 @@ public class PulsarAdapterUtil {
|
||||
this.label = label;
|
||||
}
|
||||
}
|
||||
public static boolean isValidPersistenceType(String type) {
|
||||
return Arrays.stream(PERSISTENT_TYPES.values()).anyMatch(t -> t.label.equals(type));
|
||||
}
|
||||
|
||||
///////
|
||||
// Valid Pulsar client configuration (activity-level settings)
|
||||
@ -194,9 +192,6 @@ public class PulsarAdapterUtil {
|
||||
this.label = label;
|
||||
}
|
||||
}
|
||||
public static boolean isValidClientConfItem(String item) {
|
||||
return Arrays.stream(CLNT_CONF_KEY.values()).anyMatch(t -> t.label.equals(item));
|
||||
}
|
||||
|
||||
///////
|
||||
// Standard producer configuration (activity-level settings)
|
||||
@ -222,9 +217,6 @@ public class PulsarAdapterUtil {
|
||||
this.label = label;
|
||||
}
|
||||
}
|
||||
public static boolean isStandardProducerConfItem(String item) {
|
||||
return Arrays.stream(PRODUCER_CONF_STD_KEY.values()).anyMatch(t -> t.label.equals(item));
|
||||
}
|
||||
|
||||
// compressionType
|
||||
public enum COMPRESSION_TYPE {
|
||||
@ -239,12 +231,12 @@ public class PulsarAdapterUtil {
|
||||
COMPRESSION_TYPE(String label) {
|
||||
this.label = label;
|
||||
}
|
||||
|
||||
private final static String TYPE_LIST = Stream.of(COMPRESSION_TYPE.values()).map(t -> t.label).collect(Collectors.joining(", "));
|
||||
}
|
||||
public static boolean isValidCompressionType(String item) {
|
||||
return Arrays.stream(COMPRESSION_TYPE.values()).anyMatch(t -> t.label.equals(item));
|
||||
}
|
||||
|
||||
public static String getValidCompressionTypeList() {
|
||||
return Arrays.stream(COMPRESSION_TYPE.values()).map(t -> t.label).collect(Collectors.joining(", "));
|
||||
return COMPRESSION_TYPE.TYPE_LIST;
|
||||
}
|
||||
|
||||
///////
|
||||
@ -284,9 +276,6 @@ public class PulsarAdapterUtil {
|
||||
this.label = label;
|
||||
}
|
||||
}
|
||||
public static boolean isStandardConsumerConfItem(String item) {
|
||||
return Arrays.stream(CONSUMER_CONF_STD_KEY.values()).anyMatch(t -> t.label.equals(item));
|
||||
}
|
||||
|
||||
///////
|
||||
// Custom consumer configuration (activity-level settings)
|
||||
@ -301,9 +290,16 @@ public class PulsarAdapterUtil {
|
||||
CONSUMER_CONF_CUSTOM_KEY(String label) {
|
||||
this.label = label;
|
||||
}
|
||||
|
||||
private static final Set<String> LABELS = Stream.of(values()).map(v -> v.label).collect(Collectors.toUnmodifiableSet());
|
||||
|
||||
public static boolean isValidLabel(String label) {
|
||||
return LABELS.contains(label);
|
||||
}
|
||||
|
||||
}
|
||||
public static boolean isCustomConsumerConfItem(String item) {
|
||||
return Arrays.stream(CONSUMER_CONF_CUSTOM_KEY.values()).anyMatch(t -> t.label.equals(item));
|
||||
return CONSUMER_CONF_CUSTOM_KEY.isValidLabel(item);
|
||||
}
|
||||
|
||||
// subscriptionTyp
|
||||
@ -318,12 +314,21 @@ public class PulsarAdapterUtil {
|
||||
SUBSCRIPTION_TYPE(String label) {
|
||||
this.label = label;
|
||||
}
|
||||
|
||||
private static final Set<String> LABELS = Stream.of(values()).map(v -> v.label)
|
||||
.collect(Collectors.toUnmodifiableSet());
|
||||
|
||||
public static boolean isValidLabel(String label) {
|
||||
return LABELS.contains(label);
|
||||
}
|
||||
|
||||
private final static String TYPE_LIST = Stream.of(COMPRESSION_TYPE.values()).map(t -> t.label).collect(Collectors.joining(", "));
|
||||
}
|
||||
public static boolean isValidSubscriptionType(String item) {
|
||||
return Arrays.stream(SUBSCRIPTION_TYPE.values()).anyMatch(t -> t.label.equals(item));
|
||||
return SUBSCRIPTION_TYPE.isValidLabel(item);
|
||||
}
|
||||
public static String getValidSubscriptionTypeList() {
|
||||
return Arrays.stream(SUBSCRIPTION_TYPE.values()).map(t -> t.label).collect(Collectors.joining(", "));
|
||||
return SUBSCRIPTION_TYPE.TYPE_LIST;
|
||||
}
|
||||
|
||||
// subscriptionInitialPosition
|
||||
@ -336,12 +341,12 @@ public class PulsarAdapterUtil {
|
||||
SUBSCRIPTION_INITIAL_POSITION(String label) {
|
||||
this.label = label;
|
||||
}
|
||||
}
|
||||
public static boolean isValidSubscriptionInitialPosition(String item) {
|
||||
return Arrays.stream(SUBSCRIPTION_INITIAL_POSITION.values()).anyMatch(t -> t.label.equals(item));
|
||||
|
||||
private final static String TYPE_LIST = Stream.of(COMPRESSION_TYPE.values()).map(t -> t.label).collect(Collectors.joining(", "));
|
||||
|
||||
}
|
||||
public static String getValidSubscriptionInitialPositionList() {
|
||||
return Arrays.stream(SUBSCRIPTION_INITIAL_POSITION.values()).map(t -> t.label).collect(Collectors.joining(", "));
|
||||
return SUBSCRIPTION_INITIAL_POSITION.TYPE_LIST;
|
||||
}
|
||||
|
||||
// regexSubscriptionMode
|
||||
@ -355,12 +360,12 @@ public class PulsarAdapterUtil {
|
||||
REGEX_SUBSCRIPTION_MODE(String label) {
|
||||
this.label = label;
|
||||
}
|
||||
|
||||
private final static String TYPE_LIST = Stream.of(COMPRESSION_TYPE.values()).map(t -> t.label).collect(Collectors.joining(", "));
|
||||
}
|
||||
public static boolean isValidRegexSubscriptionMode(String item) {
|
||||
return Arrays.stream(REGEX_SUBSCRIPTION_MODE.values()).anyMatch(t -> t.label.equals(item));
|
||||
}
|
||||
|
||||
public static String getValidRegexSubscriptionModeList() {
|
||||
return Arrays.stream(REGEX_SUBSCRIPTION_MODE.values()).map(t -> t.label).collect(Collectors.joining(", "));
|
||||
return REGEX_SUBSCRIPTION_MODE.TYPE_LIST;
|
||||
}
|
||||
|
||||
///////
|
||||
@ -383,9 +388,6 @@ public class PulsarAdapterUtil {
|
||||
this.label = label;
|
||||
}
|
||||
}
|
||||
public static boolean isStandardReaderConfItem(String item) {
|
||||
return Arrays.stream(READER_CONF_STD_KEY.values()).anyMatch(t -> t.label.equals(item));
|
||||
}
|
||||
|
||||
///////
|
||||
// Custom reader configuration (activity-level settings)
|
||||
@ -400,9 +402,6 @@ public class PulsarAdapterUtil {
|
||||
this.label = label;
|
||||
}
|
||||
}
|
||||
public static boolean isCustomReaderConfItem(String item) {
|
||||
return Arrays.stream(READER_CONF_CUSTOM_KEY.values()).anyMatch(t -> t.label.equals(item));
|
||||
}
|
||||
|
||||
///////
|
||||
// Valid read positions for a Pulsar reader
|
||||
@ -415,156 +414,84 @@ public class PulsarAdapterUtil {
|
||||
READER_MSG_POSITION_TYPE(String label) {
|
||||
this.label = label;
|
||||
}
|
||||
|
||||
private static final Set<String> LABELS = Stream.of(values()).map(v -> v.label)
|
||||
.collect(Collectors.toUnmodifiableSet());
|
||||
|
||||
public static boolean isValidLabel(String label) {
|
||||
return LABELS.contains(label);
|
||||
}
|
||||
}
|
||||
public static boolean isValideReaderStartPosition(String item) {
|
||||
return Arrays.stream(READER_MSG_POSITION_TYPE.values()).anyMatch(t -> t.label.equals(item));
|
||||
return READER_MSG_POSITION_TYPE.isValidLabel(item);
|
||||
}
|
||||
|
||||
private static final Map<String, Schema<?>> PRIMITIVE_SCHEMA_TYPE_MAPPING = Stream.of(SchemaType.values())
|
||||
.filter(SchemaType::isPrimitive)
|
||||
.collect(Collectors.toUnmodifiableMap(schemaType -> schemaType.name().toUpperCase(),
|
||||
schemaType -> Schema.getSchema(SchemaInfo.builder().type(schemaType).build())));
|
||||
|
||||
///////
|
||||
// Primitive Schema type
|
||||
public static boolean isPrimitiveSchemaTypeStr(String typeStr) {
|
||||
boolean isPrimitive = false;
|
||||
|
||||
// Use "BYTES" as the default type if the type string is not explicitly specified
|
||||
if (StringUtils.isBlank(typeStr)) {
|
||||
typeStr = "BYTES";
|
||||
}
|
||||
|
||||
if (typeStr.equalsIgnoreCase("BOOLEAN") || typeStr.equalsIgnoreCase("INT8") ||
|
||||
typeStr.equalsIgnoreCase("INT16") || typeStr.equalsIgnoreCase("INT32") ||
|
||||
typeStr.equalsIgnoreCase("INT64") || typeStr.equalsIgnoreCase("FLOAT") ||
|
||||
typeStr.equalsIgnoreCase("DOUBLE") || typeStr.equalsIgnoreCase("BYTES") ||
|
||||
typeStr.equalsIgnoreCase("DATE") || typeStr.equalsIgnoreCase("TIME") ||
|
||||
typeStr.equalsIgnoreCase("TIMESTAMP") || typeStr.equalsIgnoreCase("INSTANT") ||
|
||||
typeStr.equalsIgnoreCase("LOCAL_DATE") || typeStr.equalsIgnoreCase("LOCAL_TIME") ||
|
||||
typeStr.equalsIgnoreCase("LOCAL_DATE_TIME")) {
|
||||
isPrimitive = true;
|
||||
}
|
||||
|
||||
return isPrimitive;
|
||||
return StringUtils.isBlank(typeStr) || PRIMITIVE_SCHEMA_TYPE_MAPPING.containsKey(typeStr.toUpperCase());
|
||||
}
|
||||
|
||||
public static Schema<?> getPrimitiveTypeSchema(String typeStr) {
|
||||
Schema<?> schema;
|
||||
|
||||
if (StringUtils.isBlank(typeStr)) {
|
||||
typeStr = "BYTES";
|
||||
String lookupKey = StringUtils.isBlank(typeStr) ? "BYTES" : typeStr.toUpperCase();
|
||||
Schema<?> schema = PRIMITIVE_SCHEMA_TYPE_MAPPING.get(lookupKey);
|
||||
if (schema == null) {
|
||||
throw new PulsarAdapterInvalidParamException("Invalid Pulsar primitive schema type string : " + typeStr);
|
||||
}
|
||||
|
||||
switch (typeStr.toUpperCase()) {
|
||||
case "BOOLEAN":
|
||||
schema = Schema.BOOL;
|
||||
break;
|
||||
case "INT8":
|
||||
schema = Schema.INT8;
|
||||
break;
|
||||
case "INT16":
|
||||
schema = Schema.INT16;
|
||||
break;
|
||||
case "INT32":
|
||||
schema = Schema.INT32;
|
||||
break;
|
||||
case "INT64":
|
||||
schema = Schema.INT64;
|
||||
break;
|
||||
case "FLOAT":
|
||||
schema = Schema.FLOAT;
|
||||
break;
|
||||
case "DOUBLE":
|
||||
schema = Schema.DOUBLE;
|
||||
break;
|
||||
case "DATE":
|
||||
schema = Schema.DATE;
|
||||
break;
|
||||
case "TIME":
|
||||
schema = Schema.TIME;
|
||||
break;
|
||||
case "TIMESTAMP":
|
||||
schema = Schema.TIMESTAMP;
|
||||
break;
|
||||
case "INSTANT":
|
||||
schema = Schema.INSTANT;
|
||||
break;
|
||||
case "LOCAL_DATE":
|
||||
schema = Schema.LOCAL_DATE;
|
||||
break;
|
||||
case "LOCAL_TIME":
|
||||
schema = Schema.LOCAL_TIME;
|
||||
break;
|
||||
case "LOCAL_DATE_TIME":
|
||||
schema = Schema.LOCAL_DATE_TIME;
|
||||
break;
|
||||
case "BYTES":
|
||||
schema = Schema.BYTES;
|
||||
break;
|
||||
// Report an error if non-valid, non-empty schema type string is provided
|
||||
default:
|
||||
throw new PulsarAdapterInvalidParamException("Invalid Pulsar primitive schema type string : " + typeStr);
|
||||
}
|
||||
|
||||
return schema;
|
||||
}
|
||||
|
||||
///////
|
||||
// Complex strut type: Avro or Json
|
||||
public static boolean isAvroSchemaTypeStr(String typeStr) {
|
||||
return (StringUtils.isNotBlank(typeStr) && typeStr.equalsIgnoreCase("AVRO"));
|
||||
return "AVRO".equalsIgnoreCase(typeStr);
|
||||
}
|
||||
|
||||
// automatic decode the type from the Registry
|
||||
public static boolean isAutoConsumeSchemaTypeStr(String typeStr) {
|
||||
return (StringUtils.isNotBlank(typeStr) && typeStr.equalsIgnoreCase("AUTO_CONSUME"));
|
||||
return "AUTO_CONSUME".equalsIgnoreCase(typeStr);
|
||||
}
|
||||
public static Schema<?> getAvroSchema(String typeStr, String definitionStr) {
|
||||
String schemaDefinitionStr = definitionStr;
|
||||
String filePrefix = "file://";
|
||||
Schema<?> schema;
|
||||
|
||||
private static final Map<String, Schema<?>> AVRO_SCHEMA_CACHE = new ConcurrentHashMap<>();
|
||||
|
||||
public static Schema<?> getAvroSchema(String typeStr, final String definitionStr) {
|
||||
// Check if payloadStr points to a file (e.g. "file:///path/to/a/file")
|
||||
if (isAvroSchemaTypeStr(typeStr)) {
|
||||
if (StringUtils.isBlank(schemaDefinitionStr)) {
|
||||
throw new PulsarAdapterInvalidParamException(
|
||||
"Schema definition must be provided for \"Avro\" schema type!");
|
||||
if (StringUtils.isBlank(definitionStr)) {
|
||||
throw new PulsarAdapterInvalidParamException("Schema definition must be provided for \"Avro\" schema type!");
|
||||
}
|
||||
else if (schemaDefinitionStr.startsWith(filePrefix)) {
|
||||
try {
|
||||
Path filePath = Paths.get(URI.create(schemaDefinitionStr));
|
||||
schemaDefinitionStr = Files.readString(filePath, StandardCharsets.US_ASCII);
|
||||
return AVRO_SCHEMA_CACHE.computeIfAbsent(definitionStr, __ -> {
|
||||
String schemaDefinitionStr = definitionStr;
|
||||
if (schemaDefinitionStr.startsWith("file://")) {
|
||||
try {
|
||||
Path filePath = Paths.get(URI.create(schemaDefinitionStr));
|
||||
schemaDefinitionStr = Files.readString(filePath, StandardCharsets.UTF_8);
|
||||
} catch (IOException ioe) {
|
||||
throw new PulsarAdapterUnexpectedException("Error reading the specified \"Avro\" schema definition file: " + definitionStr + ": " + ioe.getMessage());
|
||||
}
|
||||
}
|
||||
catch (IOException ioe) {
|
||||
throw new PulsarAdapterUnexpectedException(
|
||||
"Error reading the specified \"Avro\" schema definition file: " + definitionStr + ": " + ioe.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
schema = PulsarAvroSchemaUtil.GetSchema_PulsarAvro("NBAvro", schemaDefinitionStr);
|
||||
return PulsarAvroSchemaUtil.GetSchema_PulsarAvro("NBAvro", schemaDefinitionStr);
|
||||
});
|
||||
} else {
|
||||
throw new PulsarAdapterInvalidParamException("Trying to create a \"Avro\" schema for a non-Avro schema type string: " + typeStr);
|
||||
}
|
||||
else {
|
||||
throw new PulsarAdapterInvalidParamException(
|
||||
"Trying to create a \"Avro\" schema for a non-Avro schema type string: " + typeStr);
|
||||
}
|
||||
|
||||
return schema;
|
||||
}
|
||||
|
||||
///////
|
||||
// Generate effective key string
|
||||
public static String buildCacheKey(String... keyParts) {
|
||||
// Ignore blank keyPart
|
||||
String joinedKeyStr =
|
||||
Stream.of(keyParts)
|
||||
.filter(s -> !StringUtils.isBlank(s))
|
||||
.collect(Collectors.joining(","));
|
||||
|
||||
return Base64.getEncoder().encodeToString(joinedKeyStr.getBytes());
|
||||
}
|
||||
|
||||
///////
|
||||
// Convert JSON string to a key/value map
|
||||
public static Map<String, String> convertJsonToMap(String jsonStr) throws Exception {
|
||||
ObjectMapper mapper = new ObjectMapper();
|
||||
return mapper.readValue(jsonStr, Map.class);
|
||||
private static final ObjectMapper JACKSON_OBJECT_MAPPER = new ObjectMapper();
|
||||
private static final TypeReference<Map<String, String>> MAP_TYPE_REF = new TypeReference<>() {};
|
||||
|
||||
public static Map<String, String> convertJsonToMap(String jsonStr) throws IOException {
|
||||
return JACKSON_OBJECT_MAPPER.readValue(jsonStr, MAP_TYPE_REF);
|
||||
}
|
||||
|
||||
|
||||
///////
|
||||
// Get full namespace name (<tenant>/<namespace>) from a Pulsar topic URI
|
||||
public static String getFullNamespaceName(String topicUri) {
|
||||
|
@ -0,0 +1,90 @@
|
||||
/*
|
||||
* Copyright (c) 2022 nosqlbench
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package io.nosqlbench.adapter.pulsar.util;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
class MessageSequenceNumberSendingHandlerTest {
|
||||
MessageSequenceNumberSendingHandler sequenceNumberSendingHandler = new MessageSequenceNumberSendingHandler();
|
||||
|
||||
@Test
|
||||
void shouldAddMonotonicSequence() {
|
||||
for (long l = 1; l <= 100; l++) {
|
||||
assertEquals(l, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldInjectMessageLoss() {
|
||||
assertEquals(1L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
assertEquals(3L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.singleton(PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE.MsgLoss), 100));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldInjectMessageDuplication() {
|
||||
assertEquals(1L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
assertEquals(1L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.singleton(PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE.MsgDup), 100));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldInjectMessageOutOfOrder() {
|
||||
assertEquals(1L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
assertEquals(4L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.singleton(PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE.OutOfOrder), 100));
|
||||
assertEquals(2L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
assertEquals(3L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
assertEquals(5L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
assertEquals(6, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldInjectOneOfTheSimulatedErrorsRandomly() {
|
||||
Set<PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> allErrorTypes = new HashSet<>(Arrays.asList(PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE.values()));
|
||||
|
||||
assertEquals(1L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
long previousSequenceNumber = 1L;
|
||||
int outOfSequenceInjectionCounter = 0;
|
||||
int messageDupCounter = 0;
|
||||
int messageLossCounter = 0;
|
||||
int successCounter = 0;
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
long nextSequenceNumber = sequenceNumberSendingHandler.getNextSequenceNumber(allErrorTypes);
|
||||
if (nextSequenceNumber >= previousSequenceNumber + 3) {
|
||||
outOfSequenceInjectionCounter++;
|
||||
} else if (nextSequenceNumber <= previousSequenceNumber) {
|
||||
messageDupCounter++;
|
||||
} else if (nextSequenceNumber >= previousSequenceNumber + 2) {
|
||||
messageLossCounter++;
|
||||
} else if (nextSequenceNumber == previousSequenceNumber + 1) {
|
||||
successCounter++;
|
||||
}
|
||||
previousSequenceNumber = nextSequenceNumber;
|
||||
}
|
||||
assertTrue(outOfSequenceInjectionCounter > 0);
|
||||
assertTrue(messageDupCounter > 0);
|
||||
assertTrue(messageLossCounter > 0);
|
||||
assertEquals(1000, outOfSequenceInjectionCounter + messageDupCounter + messageLossCounter + successCounter);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,247 @@
|
||||
/*
|
||||
* Copyright (c) 2022 nosqlbench
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package io.nosqlbench.adapter.pulsar.util;
|
||||
|
||||
import com.codahale.metrics.Counter;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.ValueSource;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
class ReceivedMessageSequenceTrackerTest {
|
||||
Counter msgErrOutOfSeqCounter = new Counter();
|
||||
Counter msgErrDuplicateCounter = new Counter();
|
||||
Counter msgErrLossCounter = new Counter();
|
||||
ReceivedMessageSequenceTracker messageSequenceTracker = new ReceivedMessageSequenceTracker(msgErrOutOfSeqCounter, msgErrDuplicateCounter, msgErrLossCounter, 20, 20);
|
||||
|
||||
@Test
|
||||
void shouldCountersBeZeroWhenSequenceDoesntContainGaps() {
|
||||
// when
|
||||
for (long l = 0; l < 100L; l++) {
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
messageSequenceTracker.close();
|
||||
// then
|
||||
assertEquals(0, msgErrOutOfSeqCounter.getCount());
|
||||
assertEquals(0, msgErrDuplicateCounter.getCount());
|
||||
assertEquals(0, msgErrLossCounter.getCount());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@ValueSource(longs = {10L, 11L, 19L, 20L, 21L, 100L})
|
||||
void shouldDetectMsgLossWhenEverySecondMessageIsLost(long totalMessages) {
|
||||
doShouldDetectMsgLoss(totalMessages, 2);
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@ValueSource(longs = {10L, 11L, 19L, 20L, 21L, 100L})
|
||||
void shouldDetectMsgLossWhenEveryThirdMessageIsLost(long totalMessages) {
|
||||
doShouldDetectMsgLoss(totalMessages, 3);
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@ValueSource(longs = {20L, 21L, 40L, 41L, 42L, 43L, 100L})
|
||||
void shouldDetectMsgLossWhenEvery21stMessageIsLost(long totalMessages) {
|
||||
doShouldDetectMsgLoss(totalMessages, 21);
|
||||
}
|
||||
|
||||
private void doShouldDetectMsgLoss(long totalMessages, int looseEveryNthMessage) {
|
||||
int messagesLost = 0;
|
||||
// when
|
||||
boolean lastMessageWasLost = false;
|
||||
for (long l = 0; l < totalMessages; l++) {
|
||||
if (l % looseEveryNthMessage == 1) {
|
||||
messagesLost++;
|
||||
lastMessageWasLost = true;
|
||||
continue;
|
||||
} else {
|
||||
lastMessageWasLost = false;
|
||||
}
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
if (lastMessageWasLost) {
|
||||
messageSequenceTracker.sequenceNumberReceived(totalMessages);
|
||||
}
|
||||
messageSequenceTracker.close();
|
||||
// then
|
||||
assertEquals(0, msgErrOutOfSeqCounter.getCount());
|
||||
assertEquals(0, msgErrDuplicateCounter.getCount());
|
||||
assertEquals(messagesLost, msgErrLossCounter.getCount());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@ValueSource(longs = {10L, 11L, 19L, 20L, 21L, 100L})
|
||||
void shouldDetectMsgDuplication(long totalMessages) {
|
||||
int messagesDuplicated = 0;
|
||||
// when
|
||||
for (long l = 0; l < totalMessages; l++) {
|
||||
if (l % 2 == 1) {
|
||||
messagesDuplicated++;
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
if (totalMessages % 2 == 0) {
|
||||
messageSequenceTracker.sequenceNumberReceived(totalMessages);
|
||||
}
|
||||
if (totalMessages < 2 * messageSequenceTracker.getMaxTrackOutOfOrderSequenceNumbers()) {
|
||||
messageSequenceTracker.close();
|
||||
}
|
||||
|
||||
// then
|
||||
assertEquals(0, msgErrOutOfSeqCounter.getCount());
|
||||
assertEquals(messagesDuplicated, msgErrDuplicateCounter.getCount());
|
||||
assertEquals(0, msgErrLossCounter.getCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldDetectSingleMessageOutOfSequence() {
|
||||
// when
|
||||
for (long l = 0; l < 10L; l++) {
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
messageSequenceTracker.sequenceNumberReceived(10L);
|
||||
messageSequenceTracker.sequenceNumberReceived(12L);
|
||||
messageSequenceTracker.sequenceNumberReceived(11L);
|
||||
for (long l = 13L; l < 100L; l++) {
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
|
||||
// then
|
||||
assertEquals(1, msgErrOutOfSeqCounter.getCount());
|
||||
assertEquals(0, msgErrDuplicateCounter.getCount());
|
||||
assertEquals(0, msgErrLossCounter.getCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldDetectMultipleMessagesOutOfSequence() {
|
||||
// when
|
||||
for (long l = 0; l < 10L; l++) {
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
messageSequenceTracker.sequenceNumberReceived(10L);
|
||||
messageSequenceTracker.sequenceNumberReceived(14L);
|
||||
messageSequenceTracker.sequenceNumberReceived(13L);
|
||||
messageSequenceTracker.sequenceNumberReceived(11L);
|
||||
messageSequenceTracker.sequenceNumberReceived(12L);
|
||||
for (long l = 15L; l < 100L; l++) {
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
|
||||
// then
|
||||
assertEquals(2, msgErrOutOfSeqCounter.getCount());
|
||||
assertEquals(0, msgErrDuplicateCounter.getCount());
|
||||
assertEquals(0, msgErrLossCounter.getCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldDetectIndividualMessageLoss() {
|
||||
// when
|
||||
for (long l = 0; l < 100L; l++) {
|
||||
if (l != 11L) {
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
}
|
||||
messageSequenceTracker.close();
|
||||
|
||||
// then
|
||||
assertEquals(0, msgErrOutOfSeqCounter.getCount());
|
||||
assertEquals(0, msgErrDuplicateCounter.getCount());
|
||||
assertEquals(1, msgErrLossCounter.getCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldDetectGapAndMessageDuplication() {
|
||||
// when
|
||||
for (long l = 0; l < 100L; l++) {
|
||||
if (l != 11L) {
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
if (l == 12L) {
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
}
|
||||
messageSequenceTracker.close();
|
||||
|
||||
// then
|
||||
assertEquals(0, msgErrOutOfSeqCounter.getCount());
|
||||
assertEquals(1, msgErrDuplicateCounter.getCount());
|
||||
assertEquals(1, msgErrLossCounter.getCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldDetectGapAndMessageDuplicationTimes2() {
|
||||
// when
|
||||
for (long l = 0; l < 100L; l++) {
|
||||
if (l != 11L) {
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
if (l == 12L) {
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
}
|
||||
messageSequenceTracker.close();
|
||||
|
||||
// then
|
||||
assertEquals(0, msgErrOutOfSeqCounter.getCount());
|
||||
assertEquals(2, msgErrDuplicateCounter.getCount());
|
||||
assertEquals(1, msgErrLossCounter.getCount());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
void shouldDetectDelayedOutOfOrderDelivery() {
|
||||
// when
|
||||
for (long l = 0; l < 5 * messageSequenceTracker.getMaxTrackOutOfOrderSequenceNumbers(); l++) {
|
||||
if (l != 10) {
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
if (l == messageSequenceTracker.getMaxTrackOutOfOrderSequenceNumbers() * 2) {
|
||||
messageSequenceTracker.sequenceNumberReceived(10);
|
||||
}
|
||||
}
|
||||
messageSequenceTracker.close();
|
||||
|
||||
// then
|
||||
assertEquals(1, msgErrOutOfSeqCounter.getCount());
|
||||
assertEquals(0, msgErrDuplicateCounter.getCount());
|
||||
assertEquals(0, msgErrLossCounter.getCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldDetectDelayedOutOfOrderDeliveryOf2ConsecutiveSequenceNumbers() {
|
||||
// when
|
||||
for (long l = 0; l < 5 * messageSequenceTracker.getMaxTrackOutOfOrderSequenceNumbers(); l++) {
|
||||
if (l != 10 && l != 11) {
|
||||
messageSequenceTracker.sequenceNumberReceived(l);
|
||||
}
|
||||
if (l == messageSequenceTracker.getMaxTrackOutOfOrderSequenceNumbers() * 2) {
|
||||
messageSequenceTracker.sequenceNumberReceived(10);
|
||||
messageSequenceTracker.sequenceNumberReceived(11);
|
||||
}
|
||||
}
|
||||
messageSequenceTracker.close();
|
||||
|
||||
// then
|
||||
assertEquals(2, msgErrOutOfSeqCounter.getCount());
|
||||
assertEquals(0, msgErrDuplicateCounter.getCount());
|
||||
assertEquals(0, msgErrLossCounter.getCount());
|
||||
}
|
||||
}
|
@ -33,7 +33,7 @@ Run a stdout activity named 'stdout-test', with definitions from activities/stdo
|
||||
|
||||
To enable, specifies the type of the SSL implementation with either `jdk` or `openssl`.
|
||||
|
||||
[Additional parameters may need to be provided](../../../../driver-cql/src/main/resources/ssl.md).
|
||||
See the ssl help topic for more details with `nb5 help ssl` for more details.
|
||||
|
||||
- **host** - this is the name to bind to (local interface address)
|
||||
- default: localhost
|
||||
|
@ -45,7 +45,7 @@ Run a stdout activity named 'stdout-test', with definitions from activities/stdo
|
||||
|
||||
To enable, specifies the type of the SSL implementation with either `jdk` or `openssl`.
|
||||
|
||||
[Additional parameters may need to be provided](../../../../driver-cql/src/main/resources/ssl.md).
|
||||
See the ssl help topic for more details with `nb5 help ssl` for more details.
|
||||
|
||||
- **host** - this is the name to bind to (local interface address)
|
||||
- default: localhost
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022 nosqlbench
|
||||
* Copyright (c) 2022-2023 nosqlbench
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -166,7 +166,7 @@ public abstract class BaseDriverAdapter<R extends Op, S> implements DriverAdapte
|
||||
.add(Param.optional("instrument", Boolean.class))
|
||||
.add(Param.optional(List.of("workload", "yaml"), String.class, "location of workload yaml file"))
|
||||
.add(Param.optional("driver", String.class))
|
||||
.add(Param.defaultTo("dryrun",false))
|
||||
.add(Param.defaultTo("dryrun","none").setRegex("(op|jsonnet|none)"))
|
||||
.asReadOnly();
|
||||
}
|
||||
|
||||
|
@ -302,7 +302,7 @@ in the workload construction guide.
|
||||
|
||||
```yaml
|
||||
tags:
|
||||
phase: main
|
||||
block: main
|
||||
```
|
||||
|
||||
*json:*
|
||||
@ -311,7 +311,7 @@ tags:
|
||||
|
||||
{
|
||||
"tags": {
|
||||
"phase": "main"
|
||||
"block": "main"
|
||||
}
|
||||
}
|
||||
```
|
||||
@ -331,7 +331,7 @@ Blocks are used to logically partition a workload for the purposes of grouping,
|
||||
executing subsets and op sequences. Blocks can contain any of the defined elements above.
|
||||
Every op template within a block automatically gets a tag with the name 'block' and the value of
|
||||
the block name. This makes it easy to select a whole block at a time with a tag filter like
|
||||
`tags=block:schema`.
|
||||
`tags=block:"schema.*"`.
|
||||
|
||||
Blocks are not recursive. You may not put a block inside another block.
|
||||
|
||||
|
@ -269,7 +269,7 @@ ops:
|
||||
bindings:
|
||||
binding1: NumberNameToString();
|
||||
tags:
|
||||
phase: schema
|
||||
block: schema
|
||||
params:
|
||||
prepared: false
|
||||
description: This is just an example operation
|
||||
@ -292,7 +292,7 @@ ops:
|
||||
"prepared": false
|
||||
},
|
||||
"tags": {
|
||||
"phase": "schema"
|
||||
"block": "schema"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -317,7 +317,7 @@ ops:
|
||||
"prepared": false
|
||||
},
|
||||
"tags": {
|
||||
"phase": "schema",
|
||||
"block": "schema",
|
||||
"name": "block0--special-op-name",
|
||||
"block": "block0"
|
||||
}
|
||||
@ -351,7 +351,7 @@ blocks:
|
||||
bindings:
|
||||
binding1: NumberNameToString();
|
||||
tags:
|
||||
phase: schema
|
||||
block: schema
|
||||
params:
|
||||
prepared: false
|
||||
description: This is just an example operation
|
||||
@ -386,7 +386,7 @@ blocks:
|
||||
"prepared": false
|
||||
},
|
||||
"tags": {
|
||||
"phase": "schema"
|
||||
"block": "schema"
|
||||
},
|
||||
"ops": {
|
||||
"op1": {
|
||||
@ -416,7 +416,7 @@ blocks:
|
||||
"prepared": false
|
||||
},
|
||||
"tags": {
|
||||
"phase": "schema",
|
||||
"block": "schema",
|
||||
"docleveltag": "is-tagging-everything",
|
||||
"name": "block-named-fred--special-op-name",
|
||||
"block": "block-named-fred"
|
||||
|
@ -77,7 +77,7 @@ public class RawYamlTemplateLoaderTest {
|
||||
assertThat(schemaOnlyScenario.keySet())
|
||||
.containsExactly("000");
|
||||
assertThat(schemaOnlyScenario.values())
|
||||
.containsExactly("run driver=blah tags=phase:schema");
|
||||
.containsExactly("run driver=blah tags=block:\"schema.*\"");
|
||||
|
||||
assertThat(rawOpsDoc.getName()).isEqualTo("doc1");
|
||||
assertThat(blocks).hasSize(1);
|
||||
|
@ -7,7 +7,7 @@ scenarios:
|
||||
- run driver=stdout alias=step1
|
||||
- run driver=stdout alias=step2
|
||||
schema-only:
|
||||
- run driver=blah tags=phase:schema
|
||||
- run driver=blah tags=block:"schema.*"
|
||||
|
||||
tags:
|
||||
atagname: atagvalue
|
||||
|
@ -1,4 +1,3 @@
|
||||
# nb -v run driver=cql yaml=cql-iot tags=phase:schema host=dsehost
|
||||
description: |
|
||||
put workload descript here
|
||||
scenarios:
|
||||
|
@ -19,6 +19,6 @@ However, there are other ways to feed an activity. All inputs are
|
||||
modular within the nosqlbench runtime. To see what inputs are
|
||||
available, you can simpy run:
|
||||
|
||||
PROG --list-input-types
|
||||
${PROG} --list-input-types
|
||||
|
||||
Any input listed this way should have its own documentation.
|
||||
|
@ -14,7 +14,7 @@ All cycle logfiles have the *.cyclelog* suffix.
|
||||
You can dump an rlefile to the screen to see the content in text form by
|
||||
running a command like this:
|
||||
|
||||
PROG --export-cycle-log <filename> [spans|cycles]
|
||||
${PROG} --export-cycle-log <filename> [spans|cycles]
|
||||
|
||||
You do not need to specify the extension. If you do not specify either
|
||||
optional format at the end, then *spans* is assumed. It will print output
|
||||
@ -40,7 +40,7 @@ If you need to modify and then re-use a cycle log, you can do this with
|
||||
simple text tools. Once you have modified the file, you can import it back
|
||||
to the native format with:
|
||||
|
||||
PROG --import-cycle-log <infile.txt> <outfile.cyclelog>
|
||||
${PROG} --import-cycle-log <infile.txt> <outfile.cyclelog>
|
||||
|
||||
The importer recognizes both formats listed above.
|
||||
|
||||
|
@ -161,7 +161,7 @@ This puts NB on a footing to be "Modular Jar" compatible, which is a step toward
|
||||
* auto-injected statement block and statement name tags.
|
||||
- this means: You can now construct filters for specific blocks or statements simply by
|
||||
knowing their name:
|
||||
- `tags=block:schema` or `tags='main-.*'`
|
||||
- `tags=block:"schema.*"` or `tags='main-.*'`
|
||||
* safe usage of activity params and template vars are compatible, but may not be ambiguous. This
|
||||
means that if you have a template variable in myworkload.yaml, it must be distinctly named
|
||||
from any valid activity parameters, or an error is thrown. This eliminates a confusing source
|
||||
@ -229,7 +229,7 @@ cqlgen - takes schema.cql tablestats -> workload.yaml
|
||||
sstablegen
|
||||
|
||||
* yaml+nb version checks
|
||||
- `min_version: "4.17.15"`
|
||||
- `min_version: "5.17.1"`
|
||||
|
||||
|
||||
* Mac M1 support
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022 nosqlbench
|
||||
* Copyright (c) 2022-2023 nosqlbench
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -35,27 +35,27 @@ public enum RunState {
|
||||
* Initial state after creation of a motor. This is the initial state upon instantiation of a motor, before
|
||||
* it is called on to do any active logic besides what occurs in the constructor.
|
||||
*/
|
||||
Uninitialized("i⌀"),
|
||||
Uninitialized("⌀"),
|
||||
|
||||
/**
|
||||
* A thread has been invoked, but is initializing and preparing for its main control loop.
|
||||
* This is signaled <EM>by the motor</EM> after {@link Runnable#run}, but before entering the main processing
|
||||
* loop.
|
||||
*/
|
||||
Starting("s⏫"),
|
||||
Starting("⏫"),
|
||||
|
||||
/**
|
||||
* A thread is iterating within the main control loop.
|
||||
* This is signaled <EM>by the motor</EM> once initialization in the main loop is complete and immediately
|
||||
* before it enters it's main processing loop.
|
||||
*/
|
||||
Running("R\u23F5"),
|
||||
Running("\u23F5"),
|
||||
|
||||
/**
|
||||
* <P>The thread has been requested to stop. This can be set by a managing thread which is not the
|
||||
* motor thread, or by the motor thread. In either case, the motor thread is required to observe changes to this and initiate shutdown.</P>
|
||||
*/
|
||||
Stopping("s⏬"),
|
||||
Stopping("⏬"),
|
||||
|
||||
/**
|
||||
* The thread has stopped. This should only be set by the motor. This state will only be visible
|
||||
@ -64,7 +64,7 @@ public enum RunState {
|
||||
* <P>NOTE: When a motor is stopped or finished, its state will remain visible in state tracking until
|
||||
* {@link Motor#getState()}.{@link MotorState#removeState()} is called.</P>
|
||||
*/
|
||||
Stopped("e\u23F9"),
|
||||
Stopped("\u23F9"),
|
||||
|
||||
/**
|
||||
* <P>A thread has exhausted its supply of values on the input (AKA cycles), thus has completed its work.
|
||||
@ -73,12 +73,12 @@ public enum RunState {
|
||||
* <P>NOTE: When a motor is stopped or finished, its state will remain visible in state tracking until
|
||||
* {@link Motor#getState()}.{@link MotorState#removeState()} is called.</P>
|
||||
*/
|
||||
Finished("F⏯"),
|
||||
Finished("⏯"),
|
||||
|
||||
/**
|
||||
* If a motor has seen an exception, it goes into errored state before propagating the error.
|
||||
*/
|
||||
Errored("E⚠");
|
||||
Errored("⚠");
|
||||
|
||||
private final String runcode;
|
||||
|
||||
@ -106,7 +106,7 @@ public enum RunState {
|
||||
case Stopping, Finished, Errored -> true;// A motor has exhausted its input, and is finished with its work
|
||||
default -> false;
|
||||
};
|
||||
case Stopping -> (target == Stopped||target==Finished); // A motor was stopped by request before exhausting input
|
||||
case Stopping -> (target == Stopped||target==Finished||target==Errored); // A motor was stopped by request before exhausting input
|
||||
case Finished -> (target == Running); // A motor was restarted?
|
||||
case Errored -> target==Errored;
|
||||
};
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022 nosqlbench
|
||||
* Copyright (c) 2022-2023 nosqlbench
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -22,4 +22,12 @@ public interface Stoppable {
|
||||
* completes, the request will cause the component to stop cooperatively.
|
||||
*/
|
||||
void requestStop();
|
||||
|
||||
static void stop(Object... candidates) {
|
||||
for (Object candidate : candidates) {
|
||||
if (candidate instanceof Stoppable stoppable) {
|
||||
stoppable.requestStop();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -495,7 +495,8 @@ public class SimpleActivity implements Activity, ProgressCapable, ActivityDefObs
|
||||
logger.info(() -> "skipped mapping op '" + pop.getName() + "'");
|
||||
continue;
|
||||
}
|
||||
boolean dryrun = pop.takeStaticConfigOr("dryrun", false);
|
||||
String dryrunSpec = pop.takeStaticConfigOr("dryrun", "none");
|
||||
boolean dryrun = dryrunSpec.equalsIgnoreCase("op");
|
||||
|
||||
DriverAdapter adapter = adapters.get(i);
|
||||
OpMapper opMapper = adapter.getOpMapper();
|
||||
|
@ -463,12 +463,7 @@ public class CoreMotor<D> implements ActivityDefObserver, Motor<D>, Stoppable {
|
||||
public synchronized void requestStop() {
|
||||
RunState currentState = motorState.get();
|
||||
if (Objects.requireNonNull(currentState) == Running) {
|
||||
if (input instanceof Stoppable) {
|
||||
((Stoppable) input).requestStop();
|
||||
}
|
||||
if (action instanceof Stoppable) {
|
||||
((Stoppable) action).requestStop();
|
||||
}
|
||||
Stoppable.stop(input, action);
|
||||
motorState.enterState(Stopping);
|
||||
} else {
|
||||
logger.warn(() -> "attempted to stop motor " + this.getSlotId() + ": from non Running state:" + currentState);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022 nosqlbench
|
||||
* Copyright (c) 2022-2023 nosqlbench
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -20,6 +20,8 @@ import io.nosqlbench.engine.api.activityapi.core.RunState;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* A value type which encodes the atomic state of a RunState tally.
|
||||
*/
|
||||
@ -39,21 +41,43 @@ public class RunStateImage {
|
||||
}
|
||||
|
||||
public boolean is(RunState runState) {
|
||||
return counts[runState.ordinal()]>0;
|
||||
return counts[runState.ordinal()] > 0;
|
||||
}
|
||||
|
||||
public boolean isOnly(RunState runState) {
|
||||
for (int i = 0; i < counts.length; i++) {
|
||||
if (counts[i]>0 && i!=runState.ordinal()) {
|
||||
if (counts[i] > 0 && i != runState.ordinal()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public boolean isNoneOther(RunState... runStates) {
|
||||
int[] scan = Arrays.copyOf(counts, counts.length);
|
||||
for (RunState runState : runStates) {
|
||||
scan[runState.ordinal()]=0;
|
||||
}
|
||||
for (int i : scan) {
|
||||
if (i>0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public RunState getMinState() {
|
||||
for (int ord = 0; ord < counts.length - 1; ord++) {
|
||||
if (counts[ord] > 0) {
|
||||
return RunState.values()[ord];
|
||||
}
|
||||
}
|
||||
throw new RuntimeException("There were zero states, so min state is undefined");
|
||||
}
|
||||
|
||||
public RunState getMaxState() {
|
||||
for (int ord = counts.length-1; ord >= 0; ord--) {
|
||||
if (counts[ord]>0) {
|
||||
for (int ord = counts.length - 1; ord >= 0; ord--) {
|
||||
if (counts[ord] > 0) {
|
||||
return RunState.values()[ord];
|
||||
}
|
||||
}
|
||||
@ -63,7 +87,7 @@ public class RunStateImage {
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (RunState runState : RunState.values()) {
|
||||
sb.append(runState.getCode()).append(" ").append(counts[runState.ordinal()]).append(" ");
|
||||
sb.append(runState.getCode()).append(":").append(counts[runState.ordinal()]).append(" ");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
@ -80,6 +80,7 @@ public class StandardActivity<R extends Op, S> extends SimpleActivity implements
|
||||
for (OpTemplate ot : opTemplates) {
|
||||
ParsedOp incompleteOpDef = new ParsedOp(ot, NBConfiguration.empty(), List.of());
|
||||
String driverName = incompleteOpDef.takeOptionalStaticValue("driver", String.class)
|
||||
.or(() -> incompleteOpDef.takeOptionalStaticValue("type",String.class))
|
||||
.or(() -> activityDef.getParams().getOptionalString("driver"))
|
||||
.orElseThrow(() -> new OpConfigError("Unable to identify driver name for op template:\n" + ot));
|
||||
|
||||
|
@ -16,15 +16,15 @@
|
||||
|
||||
package io.nosqlbench.engine.api.activityimpl.uniform;
|
||||
|
||||
import io.nosqlbench.api.config.standard.NBConfigModel;
|
||||
import io.nosqlbench.api.config.standard.NBConfiguration;
|
||||
import io.nosqlbench.api.config.standard.NBReconfigurable;
|
||||
import io.nosqlbench.api.engine.activityimpl.ActivityDef;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActionDispenser;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActivityType;
|
||||
import io.nosqlbench.engine.api.activityconfig.OpsLoader;
|
||||
import io.nosqlbench.engine.api.activityconfig.yaml.OpsDocList;
|
||||
import io.nosqlbench.api.engine.activityimpl.ActivityDef;
|
||||
import io.nosqlbench.engine.api.activityimpl.SimpleActivity;
|
||||
import io.nosqlbench.api.config.standard.NBConfigModel;
|
||||
import io.nosqlbench.api.config.standard.NBConfiguration;
|
||||
import io.nosqlbench.api.config.standard.NBReconfigurable;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
@ -39,7 +39,10 @@ public class StandardActivityType<A extends StandardActivity<?,?>> extends Simpl
|
||||
private final Map<String,DriverAdapter> adapters = new HashMap<>();
|
||||
|
||||
public StandardActivityType(DriverAdapter<?,?> adapter, ActivityDef activityDef) {
|
||||
super(activityDef);
|
||||
super(activityDef
|
||||
.deprecate("type","driver")
|
||||
.deprecate("yaml", "workload")
|
||||
);
|
||||
this.adapters.put(adapter.getAdapterName(),adapter);
|
||||
if (adapter instanceof ActivityDefAware) {
|
||||
((ActivityDefAware) adapter).setActivityDef(activityDef);
|
||||
|
@ -122,7 +122,8 @@ public class NBCLIScenarioParser {
|
||||
if (nameparts.length==1) {
|
||||
Map<String, String> namedScenario = scenarios.getNamedScenario(scenarioName);
|
||||
if (namedScenario==null) {
|
||||
throw new BasicError("Named step '" + scenarioName + "' was not found.");
|
||||
throw new BasicError("Unable to find named scenario '" + scenarioName + "' in workload '" + workloadName
|
||||
+ "', but you can pick from one of: " + String.join(", ", scenarios.getScenarioNames()));
|
||||
}
|
||||
namedSteps.putAll(namedScenario);
|
||||
} else {
|
||||
@ -138,7 +139,7 @@ public class NBCLIScenarioParser {
|
||||
if (selectedScenario.containsKey(stepname)) {
|
||||
namedSteps.put(stepname,selectedScenario.get(stepname));
|
||||
} else {
|
||||
throw new BasicError("Unable to find named scenario.step'" + scenarioName + "' in workload '" + workloadName
|
||||
throw new BasicError("Unable to find named scenario.step '" + scenarioName + "' in workload '" + workloadName
|
||||
+ "', but you can pick from one of: " + selectedScenario.keySet().stream().map(n -> nameparts[0].concat(".").concat(n)).collect(Collectors.joining(", ")));
|
||||
}
|
||||
}
|
||||
@ -230,6 +231,12 @@ public class NBCLIScenarioParser {
|
||||
String[] namedStepPieces = cmd.split(" ");
|
||||
for (String commandFragment : namedStepPieces) {
|
||||
Matcher matcher = WordAndMaybeAssignment.matcher(commandFragment);
|
||||
|
||||
if (commandFragment.equalsIgnoreCase("")) {
|
||||
logger.debug("Command fragment discovered to be empty. Skipping this fragment for cmd: {}", cmd);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!matcher.matches()) {
|
||||
throw new BasicError("Unable to recognize scenario cmd spec in '" + commandFragment + "'");
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022 nosqlbench
|
||||
* Copyright (c) 2022-2023 nosqlbench
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -27,13 +27,17 @@ public class RunStateImageTest {
|
||||
public void testMaxStateImage() {
|
||||
int[] counts = new int[RunState.values().length];
|
||||
counts[RunState.Running.ordinal()]=3;
|
||||
counts[RunState.Starting.ordinal()]=2;
|
||||
RunStateImage image = new RunStateImage(counts, false);
|
||||
assertThat(image.is(RunState.Running)).isTrue();
|
||||
assertThat(image.is(RunState.Starting)).isTrue();
|
||||
assertThat(image.isTimeout()).isFalse();
|
||||
assertThat(image.is(RunState.Errored)).isFalse();
|
||||
assertThat(image.isOnly(RunState.Running)).isTrue();
|
||||
assertThat(image.isNoneOther(RunState.Starting, RunState.Running)).isTrue();
|
||||
RunState maxState = image.getMaxState();
|
||||
assertThat(maxState).isEqualTo(RunState.values()[2]);
|
||||
assertThat(maxState).isEqualTo(RunState.values()[RunState.Running.ordinal()]);
|
||||
RunState minState = image.getMinState();
|
||||
assertThat(minState).isEqualTo(RunState.values()[RunState.Starting.ordinal()]);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -153,19 +153,19 @@ public class TagFilterTest {
|
||||
public void testLeadingSpaceTrimmedInQuotedTag() {
|
||||
|
||||
Map<String, String> itemtags = new HashMap<>() {{
|
||||
put("phase", "main");
|
||||
put("block", "main");
|
||||
}};
|
||||
|
||||
TagFilter tf = new TagFilter("\"phase: main\"");
|
||||
TagFilter tf = new TagFilter("\"block: main\"");
|
||||
assertThat(tf.matches(itemtags).matched()).isTrue();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAnyCondition() {
|
||||
Map<String, String> itemtags = Map.of("phase", "main", "truck", "car");
|
||||
TagFilter tf = new TagFilter("any(truck:car,phase:moon)");
|
||||
Map<String, String> itemtags = Map.of("block", "main", "truck", "car");
|
||||
TagFilter tf = new TagFilter("any(truck:car,block:moon)");
|
||||
assertThat(tf.matches(itemtags).matched()).isTrue();
|
||||
TagFilter tf2 = new TagFilter("any(car:truck,phase:moon)");
|
||||
TagFilter tf2 = new TagFilter("any(car:truck,block:moon)");
|
||||
assertThat(tf2.matches(itemtags).matched()).isFalse();
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
Running Activities and Scenarios via CLI
|
||||
========================================
|
||||
|
||||
PROG always runs a scenario script. However, there are multiple ways to tell
|
||||
PROG what that script should be.
|
||||
${PROG} always runs a scenario script. However, there are multiple ways to tell
|
||||
${PROG} what that script should be.
|
||||
|
||||
Any argument in name=value format serves as a parameter to the
|
||||
script or activity that precedes it.
|
||||
@ -10,18 +10,18 @@ script or activity that precedes it.
|
||||
To create a scenario script that simply runs a single activity to completion,
|
||||
use this format:
|
||||
~~~
|
||||
PROG activity <param>=<value> [...]
|
||||
${PROG} activity <param>=<value> [...]
|
||||
~~~
|
||||
|
||||
To create a scenario script that runs multiple activities concurrently,
|
||||
simply add more activities to the list:
|
||||
~~~
|
||||
PROG activity <param>=<value> [...] activity <param>=<value> [...]
|
||||
${PROG} activity <param>=<value> [...] activity <param>=<value> [...]
|
||||
~~~
|
||||
|
||||
To execute a scenario script directly, simply use the format:
|
||||
~~~
|
||||
PROG script <scriptname> [param=value [...]]
|
||||
${PROG} script <scriptname> [param=value [...]]
|
||||
~~~
|
||||
|
||||
Time & Size Units
|
||||
@ -55,19 +55,19 @@ so parameters may be dropped into scripts ad-hoc.
|
||||
By using the option --session-name <name>, you can name the session logfile
|
||||
that will be (over)written with execution details.
|
||||
~~~
|
||||
PROG --session-name testsession42
|
||||
${PROG} --session-name testsession42
|
||||
~~~
|
||||
|
||||
## Metric Name
|
||||
|
||||
If you need to see what metrics are available for a particular activity type,
|
||||
you can ask PROG to instantiate an activity of that type and discover the
|
||||
you can ask ${PROG} to instantiate an activity of that type and discover the
|
||||
metrics, dumping out a list. The following form of the command shows you how
|
||||
to make a list that you can copy metric names from for scripting. If you provide
|
||||
an example activity alias that matches one of your scripts, you can use it exactly
|
||||
as it appears.
|
||||
~~~
|
||||
PROG --list-metrics driver=diag alias=anexample
|
||||
${PROG} --list-metrics driver=diag alias=anexample
|
||||
~~~
|
||||
This will dump a list of metric names in the shortened format that is most suitable
|
||||
for scenario script development. This format is required for the --list-metrics
|
||||
|
@ -1,4 +1,4 @@
|
||||
### Command-Line Options ###
|
||||
# Command-Line Options
|
||||
|
||||
Help ( You're looking at it. )
|
||||
|
||||
@ -8,27 +8,31 @@ Short options, like '-v' represent simple options, like verbosity. Using multipl
|
||||
level of the option, like '-vvv'.
|
||||
|
||||
Long options, like '--help' are top-level options that may only be used once. These modify general
|
||||
behavior, or allow you to get more details on how to use PROG.
|
||||
behavior, or allow you to get more details on how to use ${PROG}.
|
||||
|
||||
All other options are either commands, or named arguments to commands. Any single word without
|
||||
dashes is a command that will be converted into script form. Any option that includes an equals sign
|
||||
is a named argument to the previous command. The following example is a commandline with a command *
|
||||
start*, and two named arguments to that command.
|
||||
|
||||
PROG start driver=diag alias=example
|
||||
${PROG} start driver=diag alias=example
|
||||
|
||||
### Discovery options ###
|
||||
## Discovery options
|
||||
|
||||
These options help you learn more about running PROG, and about the plugins that are present in your
|
||||
These options help you learn more about running ${PROG}, and about the plugins that are present in your
|
||||
particular version.
|
||||
|
||||
Show version, long form, with artifact coordinates.
|
||||
|
||||
--version
|
||||
|
||||
Get a list of additional help topics that have more detailed documentation:
|
||||
|
||||
PROG help topics
|
||||
${PROG} help topics
|
||||
|
||||
Provide specific help for the named activity type:
|
||||
|
||||
PROG help <activity type>
|
||||
${PROG} help <activity type>
|
||||
|
||||
List the available drivers:
|
||||
|
||||
@ -50,9 +54,9 @@ Provide the metrics that are available for scripting
|
||||
|
||||
--list-metrics <activity type> [ <activity name> ]
|
||||
|
||||
### Execution Options ###
|
||||
## Execution Options
|
||||
|
||||
This is how you actually tell PROG what scenario to run. Each of these commands appends script logic
|
||||
This is how you actually tell ${PROG} what scenario to run. Each of these commands appends script logic
|
||||
to the scenario that will be executed. These are considered as commands, can occur in any order and
|
||||
quantity. The only rule is that arguments in the arg=value form will apply to the preceding script
|
||||
or activity.
|
||||
@ -65,9 +69,7 @@ Add the named activity to the scenario, interpolating named parameters
|
||||
|
||||
activity [arg=value]...
|
||||
|
||||
### General options ###
|
||||
|
||||
These options modify how the scenario is run.
|
||||
## Logging options
|
||||
|
||||
Specify a directory for scenario log files:
|
||||
|
||||
@ -111,12 +113,38 @@ Specify the logging pattern for logfile only:
|
||||
# ANSI variants are auto promoted for console if --ansi=enable
|
||||
# ANSI variants are auto demoted for logfile in any case
|
||||
|
||||
## Console Options
|
||||
|
||||
Increase console logging levels: (Default console logging level is *warning*)
|
||||
|
||||
-v (info)
|
||||
-vv (debug)
|
||||
-vvv (trace)
|
||||
|
||||
--progress console:1m (disables itself if -v options are used)
|
||||
|
||||
These levels affect *only* the console output level. Other logging level parameters affect logging
|
||||
to the scenario log, stored by default in logs/...
|
||||
|
||||
Explicitly enable or disable ANSI logging support:
|
||||
(ANSI support is enabled if the TERM environment variable is defined)
|
||||
|
||||
--ansi=enabled
|
||||
--ansi=disabled
|
||||
|
||||
Adjust the progress reporting interval:
|
||||
|
||||
--progress console:1m
|
||||
|
||||
or
|
||||
|
||||
--progress logonly:5m
|
||||
|
||||
NOTE: The progress indicator on console is provided by default unless logging levels are turned up
|
||||
or there is a script invocation on the command line.
|
||||
|
||||
## Metrics options
|
||||
|
||||
Specify a directory and enable CSV reporting of metrics:
|
||||
|
||||
--report-csv-to <dirname>
|
||||
@ -158,17 +186,6 @@ Each activity can also override this value with the hdr_digits parameter. Be awa
|
||||
increase in this number multiples the amount of detail tracked on the client by 10x, so use
|
||||
caution.
|
||||
|
||||
Adjust the progress reporting interval:
|
||||
|
||||
--progress console:1m
|
||||
|
||||
or
|
||||
|
||||
--progress logonly:5m
|
||||
|
||||
NOTE: The progress indicator on console is provided by default unless logging levels are turned up
|
||||
or there is a script invocation on the command line.
|
||||
|
||||
If you want to add in classic time decaying histogram metrics for your histograms and timers, you
|
||||
may do so with this option:
|
||||
|
||||
@ -191,22 +208,6 @@ automatically. It also imports a base dashboard for nosqlbench and configures gr
|
||||
export to share with a central DataStax grafana instance (grafana can be found on localhost:3000
|
||||
with the default credentials admin/admin).
|
||||
|
||||
### Console Options ###
|
||||
|
||||
Increase console logging levels: (Default console logging level is *warning*)
|
||||
|
||||
-v (info)
|
||||
-vv (debug)
|
||||
-vvv (trace)
|
||||
|
||||
--progress console:1m (disables itself if -v options are used)
|
||||
|
||||
These levels affect *only* the console output level. Other logging level parameters affect logging
|
||||
to the scenario log, stored by default in logs/...
|
||||
|
||||
Show version, long form, with artifact coordinates.
|
||||
|
||||
--version
|
||||
|
||||
### Summary Reporting
|
||||
|
||||
|
@ -144,9 +144,9 @@ naming scheme for phase control. This means that you have tagged each of
|
||||
your statements or statement blocks with the appropriate phase tags from
|
||||
schema, rampup, main, for example.
|
||||
|
||||
- `schematags=phase:schema` - The tag filter for schema statements.
|
||||
- `schematags=block:"schema.*"` - The tag filter for schema statements.
|
||||
Findmax will run a schema phase with 1 thread by default.
|
||||
- `maintags=phase:main` - The tag filter for the main workload. This is
|
||||
- `maintags=block:main` - The tag filter for the main workload. This is
|
||||
the workload that is started and run in the background for all of the
|
||||
sampling windows.
|
||||
|
||||
|
@ -48,7 +48,7 @@ schema_activitydef = params.withDefaults({
|
||||
});
|
||||
schema_activitydef.alias="findmax_schema";
|
||||
schema_activitydef.threads="1";
|
||||
schema_activitydef.tags="TEMPLATE(schematags,phase:schema)";
|
||||
schema_activitydef.tags="TEMPLATE(schematags,block:\"schema.*\")";
|
||||
print("Creating schema with schematags:" + schema_activitydef.tags);
|
||||
|
||||
scenario.run(schema_activitydef);
|
||||
@ -63,7 +63,7 @@ activitydef = params.withDefaults({
|
||||
activitydef.alias="findmax";
|
||||
activitydef.cycles="1000000000";
|
||||
activitydef.recycles="1000000000";
|
||||
activitydef.tags="TEMPLATE(maintags,phase:main)";
|
||||
activitydef.tags="TEMPLATE(maintags,block:main)";
|
||||
print("Iterating main workload with tags:" + activitydef.tags);
|
||||
|
||||
|
||||
|
@ -16,8 +16,8 @@
|
||||
|
||||
package io.nosqlbench.engine.cli;
|
||||
|
||||
import io.nosqlbench.engine.api.scenarios.NBCLIScenarioParser;
|
||||
import io.nosqlbench.api.errors.BasicError;
|
||||
import io.nosqlbench.engine.api.scenarios.NBCLIScenarioParser;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.nio.file.Path;
|
||||
@ -31,39 +31,39 @@ public class NBCLIScenarioParserTest {
|
||||
|
||||
@Test
|
||||
public void providePathForScenario() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "local/example-scenarios" });
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"local/example-scenarios"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void defaultScenario() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test" });
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void defaultScenarioWithParams() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "cycles=100"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "cycles=100"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.get(0).getArg("cycles")).isEqualTo("100");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void namedScenario() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "schema-only"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "schema-only"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void namedScenarioWithParams() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "schema-only", "cycles=100"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "schema-only", "cycles=100"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.get(0).getArg("cycles")).containsOnlyOnce("100");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testThatSilentFinalParametersPersist() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "type=foo"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "type=foo"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.get(0).getArg("driver")).isEqualTo("stdout");
|
||||
}
|
||||
@ -71,25 +71,25 @@ public class NBCLIScenarioParserTest {
|
||||
@Test
|
||||
public void testThatVerboseFinalParameterThrowsError() {
|
||||
assertThatExceptionOfType(BasicError.class)
|
||||
.isThrownBy(() -> new NBCLIOptions(new String[]{ "scenario-test", "workload=canttouchthis"}));
|
||||
.isThrownBy(() -> new NBCLIOptions(new String[]{"scenario-test", "workload=canttouchthis"}));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testThatMissingScenarioNameThrowsError() {
|
||||
assertThatExceptionOfType(BasicError.class)
|
||||
.isThrownBy(() -> new NBCLIOptions(new String[]{ "scenario-test", "missing-scenario"}));
|
||||
.isThrownBy(() -> new NBCLIOptions(new String[]{"scenario-test", "missing-scenario"}));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testThatMultipleScenariosConcatenate() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "default", "default"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "default", "default"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isEqualTo(6);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testThatTemplatesAreExpandedDefault() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "template-test"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "template-test"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isEqualTo(1);
|
||||
assertThat(cmds.get(0).getArg("driver")).isEqualTo("stdout");
|
||||
@ -99,31 +99,31 @@ public class NBCLIScenarioParserTest {
|
||||
|
||||
@Test
|
||||
public void testThatTemplateParamsAreExpandedAndNotRemovedOverride() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "template-test", "cycles-test=20"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "template-test", "cycles-test=20"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isEqualTo(1);
|
||||
assertThat(cmds.get(0).getParams()).isEqualTo(Map.of(
|
||||
"alias","scenariotest_templatetest_withtemplate",
|
||||
"cycles","20",
|
||||
"cycles-test","20",
|
||||
"driver","stdout",
|
||||
"workload","scenario-test"
|
||||
"alias", "scenariotest_templatetest_withtemplate",
|
||||
"cycles", "20",
|
||||
"cycles-test", "20",
|
||||
"driver", "stdout",
|
||||
"workload", "scenario-test"
|
||||
));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testThatUndefValuesAreUndefined() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "schema-only", "cycles-test=20"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "schema-only", "cycles-test=20"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isEqualTo(1);
|
||||
assertThat(cmds.get(0).getParams()).isEqualTo(Map.of(
|
||||
"alias","scenariotest_schemaonly_000",
|
||||
"cycles-test","20",
|
||||
"driver","stdout",
|
||||
"tags","phase:schema",
|
||||
"workload","scenario-test"
|
||||
"alias", "scenariotest_schemaonly_schema",
|
||||
"cycles-test", "20",
|
||||
"driver", "stdout",
|
||||
"tags", "block:\"schema.*\"",
|
||||
"workload", "scenario-test"
|
||||
));
|
||||
NBCLIOptions opts1 = new NBCLIOptions(new String[]{ "scenario-test", "schema-only", "doundef=20"});
|
||||
NBCLIOptions opts1 = new NBCLIOptions(new String[]{"scenario-test", "schema-only", "doundef=20"});
|
||||
List<Cmd> cmds1 = opts1.getCommands();
|
||||
assertThat(cmds1.size()).isEqualTo(1);
|
||||
assertThat(cmds1.get(0).getArg("cycles-test")).isNull();
|
||||
@ -140,7 +140,7 @@ public class NBCLIScenarioParserTest {
|
||||
Path absolute = rel.toAbsolutePath();
|
||||
assertThat(absolute).exists();
|
||||
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ absolute.toString(), "schema-only", "cycles-test=20"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{absolute.toString(), "schema-only", "cycles-test=20"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isGreaterThan(0);
|
||||
}
|
||||
@ -150,7 +150,7 @@ public class NBCLIScenarioParserTest {
|
||||
//TODO: This might change?
|
||||
String urlScenario = "https://raw.githubusercontent.com/nosqlbench/nosqlbench/main/engine-cli/src/test/resources/activities/scenario-test.yaml";
|
||||
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ urlScenario, "schema-only", "cycles-test=20"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{urlScenario, "schema-only", "cycles-test=20"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isGreaterThan(0);
|
||||
}
|
||||
@ -163,17 +163,17 @@ public class NBCLIScenarioParserTest {
|
||||
|
||||
@Test
|
||||
public void testSubStepSelection() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "schema-only", "cycles-test=20"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "schema-only", "cycles-test=20"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isEqualTo(1);
|
||||
assertThat(cmds.get(0).getParams()).isEqualTo(Map.of(
|
||||
"alias","scenariotest_schemaonly_000",
|
||||
"cycles-test","20",
|
||||
"driver","stdout",
|
||||
"tags","phase:schema",
|
||||
"workload","scenario-test"
|
||||
"alias", "scenariotest_schemaonly_schema",
|
||||
"cycles-test", "20",
|
||||
"driver", "stdout",
|
||||
"tags", "block:\"schema.*\"",
|
||||
"workload", "scenario-test"
|
||||
));
|
||||
NBCLIOptions opts1 = new NBCLIOptions(new String[]{ "local/example-scenarios", "namedsteps.one", "testparam1=testvalue2"});
|
||||
NBCLIOptions opts1 = new NBCLIOptions(new String[]{"local/example-scenarios", "namedsteps.one", "testparam1=testvalue2"});
|
||||
List<Cmd> cmds1 = opts1.getCommands();
|
||||
assertThat(cmds1.size()).isEqualTo(1);
|
||||
assertThat(cmds1.get(0).getArg("cycles-test")).isNull();
|
||||
|
@ -1,6 +1,6 @@
|
||||
name: alternate-format-test
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql protocol_version=v4 tags=block:schema threads==1 cycles=UNDEF
|
||||
schema: run driver=cql protocol_version=v4 tags=block:"schema.*" threads==1 cycles=UNDEF
|
||||
rampup: run driver=cql protocol_version=v4 tags=block:rampup cycles=10000
|
||||
main: run driver=cql protocol_version=v4 tags=block:main_mixed cycles=10000
|
||||
|
@ -1,12 +1,13 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver==stdout workload===scenario-test tags=block:schema
|
||||
schema: run driver==stdout workload===scenario-test tags=block:"schema.*"
|
||||
rampup: run driver=stdout workload===scenario-test tags=block:rampup cycles=TEMPLATE(cycles1,10)
|
||||
main: run driver=stdout workload===scenario-test tags=block:"main.*" cycles=TEMPLATE(cycles2,10)
|
||||
schema-only:
|
||||
- "run driver=stdout workload=scenario-test tags=phase:schema doundef==undef"
|
||||
schema: run driver=stdout workload==scenario-test tags=block:"schema.*" doundef==undef
|
||||
|
||||
template-test:
|
||||
with-template: run driver=stdout cycles=TEMPLATE(cycles-test,10)
|
||||
|
||||
@ -22,6 +23,6 @@ blocks:
|
||||
main:
|
||||
ops:
|
||||
insert: |
|
||||
insert into puppies (test) values (1) ;
|
||||
insert into puppies (test) values (1);
|
||||
select: |
|
||||
select * from puppies;
|
||||
|
@ -1,8 +1,8 @@
|
||||
# example-scenarios.yaml
|
||||
scenarios:
|
||||
default:
|
||||
- run cycles=3 alias=A driver=stdout
|
||||
- run cycles=5 alias=B driver=stdout
|
||||
one: run cycles=3 alias=A driver=stdout
|
||||
two: run cycles=5 alias=B driver=stdout
|
||||
namedsteps:
|
||||
one: run cycles=3 alias=A driver=stdout testparam1=testvalue1
|
||||
two: run cycles=5 alias=B driver=stdout
|
||||
|
@ -74,7 +74,7 @@ public class GrafanaRegionAnalyzer implements Runnable {
|
||||
//[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]
|
||||
//span:interval
|
||||
//details:
|
||||
// params: ActivityDef:(4)/{keycount=5000000000L, hosts=node1, main-cycles=500, threads=1, workload=./keyvalue.yaml, cycles=2, stride=2, tags=phase:schema, password=cassandra, rf=3, pooling=16:16:500, driver=cql, rampup-cycles=5000000000, alias=keyvalue_default_schema, valuecount=5000000000L, errors=count, username=cassandra}
|
||||
// params: ActivityDef:(4)/{keycount=5000000000L, hosts=node1, main-cycles=500, threads=1, workload=./keyvalue.yaml, cycles=2, stride=2, tags=block:'schema.*', password=cassandra, rf=3, pooling=16:16:500, driver=cql, rampup-cycles=5000000000, alias=keyvalue_default_schema, valuecount=5000000000L, errors=count, username=cassandra}
|
||||
//labels:
|
||||
// layer: Activity
|
||||
// alias: keyvalue_default_schema
|
||||
|
@ -53,7 +53,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050302_981\n[2020-12-15T05:03:04.813Z[GMT] - 2020-12-15T05:03:04.813Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5B, hosts\u003dnode1, main-cycles\u003d1B, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dphase:schema, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5B, alias\u003dkeyvalue_default_schema, valuecount\u003d5B, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050302_981\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050302_981\n[2020-12-15T05:03:04.813Z[GMT] - 2020-12-15T05:03:04.813Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5B, hosts\u003dnode1, main-cycles\u003d1B, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dblock:"schema.*", password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5B, alias\u003dkeyvalue_default_schema, valuecount\u003d5B, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050302_981\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608008584813,
|
||||
"timeEnd": 1608008588900,
|
||||
"updated": 1608008588918,
|
||||
@ -81,7 +81,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050355_270\n[2020-12-15T05:03:57.142Z[GMT] - 2020-12-15T05:03:57.142Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000, hosts\u003dnode1, main-cycles\u003d5000000000, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dphase:schema, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050355_270\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050355_270\n[2020-12-15T05:03:57.142Z[GMT] - 2020-12-15T05:03:57.142Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000, hosts\u003dnode1, main-cycles\u003d5000000000, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dblock:"schema.*", password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050355_270\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608008637142,
|
||||
"timeEnd": 1608008641044,
|
||||
"updated": 1608008641063,
|
||||
@ -109,7 +109,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dphase:schema, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dblock:"schema.*", password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608008677232,
|
||||
"timeEnd": 1608008681038,
|
||||
"updated": 1608008681058,
|
||||
@ -137,7 +137,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:41.120Z[GMT] - 2020-12-15T05:04:41.120Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d960, workload\u003d./keyvalue.yaml, cycles\u003d5000000000, stride\u003d1, tags\u003dphase:rampup, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_rampup, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_rampup\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:41.120Z[GMT] - 2020-12-15T05:04:41.120Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d960, workload\u003d./keyvalue.yaml, cycles\u003d5000000000, stride\u003d1, tags\u003dblock:rampup, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_rampup, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_rampup\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608008681120,
|
||||
"timeEnd": 1608042107780,
|
||||
"updated": 1608042107859,
|
||||
@ -165,7 +165,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T14:21:47.918Z[GMT] - 2020-12-15T14:21:47.918Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d500, workload\u003d./keyvalue.yaml, cycles\u003d500, stride\u003d10, tags\u003dphase:main, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_main, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_main\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T14:21:47.918Z[GMT] - 2020-12-15T14:21:47.918Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d500, workload\u003d./keyvalue.yaml, cycles\u003d500, stride\u003d10, tags\u003dblock:main, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_main, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_main\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608042107918,
|
||||
"timeEnd": 1608042108099,
|
||||
"updated": 1608042108117,
|
||||
@ -193,7 +193,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T14:21:47.918Z[GMT] - 2020-12-15T14:21:47.918Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d500, workload\u003d./keyvalue.yaml, cycles\u003d500, stride\u003d10, tags\u003dphase:main, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_main, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_main\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T14:21:47.918Z[GMT] - 2020-12-15T14:21:47.918Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d500, workload\u003d./keyvalue.yaml, cycles\u003d500, stride\u003d10, tags\u003dblock:main, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_main, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_main\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608042107918,
|
||||
"timeEnd": 1608042108127,
|
||||
"updated": 1608042108144,
|
||||
@ -221,7 +221,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:41.120Z[GMT] - 2020-12-15T05:04:41.120Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d960, workload\u003d./keyvalue.yaml, cycles\u003d5000000000, stride\u003d1, tags\u003dphase:rampup, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_rampup, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_rampup\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:41.120Z[GMT] - 2020-12-15T05:04:41.120Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d960, workload\u003d./keyvalue.yaml, cycles\u003d5000000000, stride\u003d1, tags\u003dblock:rampup, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_rampup, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_rampup\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608008681120,
|
||||
"timeEnd": 1608042108127,
|
||||
"updated": 1608042108167,
|
||||
@ -249,7 +249,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dphase:schema, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dblock:"schema.*", password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608008677232,
|
||||
"timeEnd": 1608042108127,
|
||||
"updated": 1608042108190,
|
||||
|
@ -79,31 +79,33 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen
|
||||
// TODO: Doc how uninitialized activities do not propagate parameter map changes and how
|
||||
// TODO: this is different from preventing modification to uninitialized activities
|
||||
|
||||
// TODO: Determine whether this should really be synchronized
|
||||
|
||||
/**
|
||||
* Simply stop the motors
|
||||
*/
|
||||
public void stopActivity() {
|
||||
public void stopActivity() {
|
||||
logger.info(() -> "stopping activity in progress: " + this.getActivityDef().getAlias());
|
||||
|
||||
activity.setRunState(RunState.Stopping);
|
||||
motors.forEach(Motor::requestStop);
|
||||
tally.awaitNoneOther(RunState.Stopped, RunState.Finished);
|
||||
tally.awaitNoneOther(RunState.Stopped, RunState.Finished, RunState.Errored);
|
||||
|
||||
shutdownExecutorService(Integer.MAX_VALUE);
|
||||
tally.awaitNoneOther(RunState.Stopped, RunState.Finished);
|
||||
tally.awaitNoneOther(RunState.Stopped, RunState.Finished, RunState.Errored);
|
||||
activity.setRunState(RunState.Stopped);
|
||||
|
||||
logger.info(() -> "stopped: " + this.getActivityDef().getAlias() + " with " + motors.size() + " slots");
|
||||
|
||||
Annotators.recordAnnotation(Annotation.newBuilder()
|
||||
.session(sessionId)
|
||||
.interval(this.startedAt, this.stoppedAt)
|
||||
.layer(Layer.Activity)
|
||||
.label("alias", getActivityDef().getAlias())
|
||||
.label("driver", getActivityDef().getActivityType())
|
||||
.label("workload", getActivityDef().getParams().getOptionalString("workload").orElse("none"))
|
||||
.detail("params", getActivityDef().toString())
|
||||
.build()
|
||||
.session(sessionId)
|
||||
.interval(this.startedAt, this.stoppedAt)
|
||||
.layer(Layer.Activity)
|
||||
.label("alias", getActivityDef().getAlias())
|
||||
.label("driver", getActivityDef().getActivityType())
|
||||
.label("workload", getActivityDef().getParams().getOptionalString("workload").orElse("none"))
|
||||
.detail("params", getActivityDef().toString())
|
||||
.build()
|
||||
);
|
||||
}
|
||||
|
||||
@ -123,14 +125,14 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen
|
||||
logger.info(() -> "stopped: " + this.getActivityDef().getAlias() + " with " + motors.size() + " slots");
|
||||
|
||||
Annotators.recordAnnotation(Annotation.newBuilder()
|
||||
.session(sessionId)
|
||||
.interval(this.startedAt, this.stoppedAt)
|
||||
.layer(Layer.Activity)
|
||||
.label("alias", getActivityDef().getAlias())
|
||||
.label("driver", getActivityDef().getActivityType())
|
||||
.label("workload", getActivityDef().getParams().getOptionalString("workload").orElse("none"))
|
||||
.detail("params", getActivityDef().toString())
|
||||
.build()
|
||||
.session(sessionId)
|
||||
.interval(this.startedAt, this.stoppedAt)
|
||||
.layer(Layer.Activity)
|
||||
.label("alias", getActivityDef().getAlias())
|
||||
.label("driver", getActivityDef().getActivityType())
|
||||
.label("workload", getActivityDef().getParams().getOptionalString("workload").orElse("none"))
|
||||
.detail("params", getActivityDef().toString())
|
||||
.build()
|
||||
);
|
||||
}
|
||||
|
||||
@ -183,8 +185,9 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen
|
||||
/**
|
||||
* Shutdown the activity executor, with a grace period for the motor threads.
|
||||
*
|
||||
* @param initialMillisToWait milliseconds to wait after graceful shutdownActivity request, before forcing
|
||||
* everything to stop
|
||||
* @param initialMillisToWait
|
||||
* milliseconds to wait after graceful shutdownActivity request, before forcing
|
||||
* everything to stop
|
||||
*/
|
||||
public synchronized void forceStopScenarioAndThrow(int initialMillisToWait, boolean rethrow) {
|
||||
Exception exception = forceStopActivity(initialMillisToWait);
|
||||
@ -210,10 +213,10 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen
|
||||
adjustMotorCountToThreadParam(activity.getActivityDef());
|
||||
}
|
||||
motors.stream()
|
||||
.filter(m -> (m instanceof ActivityDefObserver))
|
||||
.filter(m -> (m instanceof ActivityDefObserver))
|
||||
// .filter(m -> m.getSlotStateTracker().getSlotState() != RunState.Uninitialized)
|
||||
// .filter(m -> m.getSlotStateTracker().getSlotState() != RunState.Starting)
|
||||
.forEach(m -> ((ActivityDefObserver) m).onActivityDefUpdate(activityDef));
|
||||
.forEach(m -> ((ActivityDefObserver) m).onActivityDefUpdate(activityDef));
|
||||
}
|
||||
}
|
||||
|
||||
@ -227,14 +230,15 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen
|
||||
|
||||
private String getSlotStatus() {
|
||||
return motors.stream()
|
||||
.map(m -> m.getState().get().getCode())
|
||||
.collect(Collectors.joining(",", "[", "]"));
|
||||
.map(m -> m.getState().get().getCode())
|
||||
.collect(Collectors.joining(",", "[", "]"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop extra motors, start missing motors
|
||||
*
|
||||
* @param activityDef the activityDef for this activity instance
|
||||
* @param activityDef
|
||||
* the activityDef for this activity instance
|
||||
*/
|
||||
private void adjustMotorCountToThreadParam(ActivityDef activityDef) {
|
||||
logger.trace(() -> ">-pre-adjust->" + getSlotStatus());
|
||||
@ -276,7 +280,7 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen
|
||||
}
|
||||
}
|
||||
|
||||
private void alignMotorStateToIntendedActivityState() {
|
||||
private synchronized void alignMotorStateToIntendedActivityState() {
|
||||
RunState intended = activity.getRunState();
|
||||
logger.trace(() -> "ADJUSTING to INTENDED " + intended);
|
||||
switch (intended) {
|
||||
@ -285,17 +289,17 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen
|
||||
case Running:
|
||||
case Starting:
|
||||
motors.stream()
|
||||
.filter(m -> m.getState().get() != RunState.Running)
|
||||
.filter(m -> m.getState().get() != RunState.Finished)
|
||||
.filter(m -> m.getState().get() != RunState.Starting)
|
||||
.forEach(m -> {
|
||||
executorService.execute(m);
|
||||
});
|
||||
.filter(m -> m.getState().get() != RunState.Running)
|
||||
.filter(m -> m.getState().get() != RunState.Finished)
|
||||
.filter(m -> m.getState().get() != RunState.Starting)
|
||||
.forEach(m -> {
|
||||
executorService.execute(m);
|
||||
});
|
||||
break;
|
||||
case Stopped:
|
||||
motors.stream()
|
||||
.filter(m -> m.getState().get() != RunState.Stopped)
|
||||
.forEach(Motor::requestStop);
|
||||
.filter(m -> m.getState().get() != RunState.Stopped)
|
||||
.forEach(Motor::requestStop);
|
||||
break;
|
||||
case Finished:
|
||||
case Stopping:
|
||||
@ -311,26 +315,28 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen
|
||||
private void awaitAlignmentOfMotorStateToActivityState() {
|
||||
|
||||
logger.debug(() -> "awaiting state alignment from " + activity.getRunState());
|
||||
RunStateImage states = null;
|
||||
switch (activity.getRunState()) {
|
||||
case Starting:
|
||||
case Running:
|
||||
tally.awaitNoneOther(RunState.Running, RunState.Finished);
|
||||
states = tally.awaitNoneOther(RunState.Running, RunState.Finished);
|
||||
break;
|
||||
case Errored:
|
||||
case Stopping:
|
||||
case Stopped:
|
||||
tally.awaitNoneOther(RunState.Stopped, RunState.Finished, RunState.Errored);
|
||||
states = tally.awaitNoneOther(RunState.Stopped, RunState.Finished, RunState.Errored);
|
||||
break;
|
||||
case Uninitialized:
|
||||
break;
|
||||
case Finished:
|
||||
tally.awaitNoneOther(RunState.Finished);
|
||||
states = tally.awaitNoneOther(RunState.Finished);
|
||||
break;
|
||||
default:
|
||||
throw new RuntimeException("Unmatched run state:" + activity.getRunState());
|
||||
}
|
||||
logger.debug("activity and threads are aligned to state " + activity.getRunState() + " for " + this.getActivity().getAlias());
|
||||
|
||||
RunState previousState = activity.getRunState();
|
||||
activity.setRunState(states.getMaxState());
|
||||
logger.debug("activity and threads are aligned to state " + previousState + " for " + this.getActivity().getAlias() + ", and advanced to " + activity.getRunState());
|
||||
}
|
||||
|
||||
|
||||
@ -349,7 +355,7 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen
|
||||
return activity;
|
||||
}
|
||||
|
||||
public void notifyException(Thread t, Throwable e) {
|
||||
public synchronized void notifyException(Thread t, Throwable e) {
|
||||
logger.debug(() -> "Uncaught exception in activity thread forwarded to activity executor: " + e.getMessage());
|
||||
this.exception = new RuntimeException("Error in activity thread " + t.getName(), e);
|
||||
this.requestStopMotors();
|
||||
@ -391,12 +397,16 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen
|
||||
// instantiate and configure fixtures that need to be present
|
||||
// before threads start running such as metrics instruments
|
||||
activity.initActivity();
|
||||
startMotorExecutorService();
|
||||
startRunningActivityThreads();
|
||||
awaitMotorsAtLeastRunning();
|
||||
logger.debug("STARTED " + activityDef.getAlias());
|
||||
awaitActivityCompletion();
|
||||
activity.shutdownActivity();
|
||||
activity.closeAutoCloseables();
|
||||
} catch (Exception e) {
|
||||
this.exception = e;
|
||||
} finally {
|
||||
activity.shutdownActivity();
|
||||
activity.closeAutoCloseables();
|
||||
}
|
||||
ExecutionResult result = new ExecutionResult(startedAt, stoppedAt, "", exception);
|
||||
return result;
|
||||
@ -420,7 +430,10 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen
|
||||
}
|
||||
|
||||
public synchronized void startActivity() {
|
||||
// we need an executor service to run motor threads on
|
||||
RunStateImage startable = tally.awaitNoneOther(1000L, RunState.Uninitialized, RunState.Stopped);
|
||||
if (startable.isTimeout()) {
|
||||
throw new RuntimeException("Unable to start activity '" + getActivity().getAlias() + "' which is in state " + startable);
|
||||
}
|
||||
startMotorExecutorService();
|
||||
startRunningActivityThreads();
|
||||
awaitMotorsAtLeastRunning();
|
||||
@ -471,10 +484,10 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen
|
||||
|
||||
private void startMotorExecutorService() {
|
||||
this.executorService = new ThreadPoolExecutor(
|
||||
0, Integer.MAX_VALUE,
|
||||
0L, TimeUnit.SECONDS,
|
||||
new SynchronousQueue<>(),
|
||||
new IndexedThreadFactory(activity.getAlias(), new ActivityExceptionHandler(this))
|
||||
0, Integer.MAX_VALUE,
|
||||
0L, TimeUnit.SECONDS,
|
||||
new SynchronousQueue<>(),
|
||||
new IndexedThreadFactory(activity.getAlias(), new ActivityExceptionHandler(this))
|
||||
);
|
||||
}
|
||||
|
||||
@ -491,14 +504,14 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen
|
||||
|
||||
logger.info(() -> "starting activity " + activity.getAlias() + " for cycles " + activity.getCycleSummary());
|
||||
Annotators.recordAnnotation(Annotation.newBuilder()
|
||||
.session(sessionId)
|
||||
.now()
|
||||
.layer(Layer.Activity)
|
||||
.label("alias", getActivityDef().getAlias())
|
||||
.label("driver", getActivityDef().getActivityType())
|
||||
.label("workload", getActivityDef().getParams().getOptionalString("workload").orElse("none"))
|
||||
.detail("params", getActivityDef().toString())
|
||||
.build()
|
||||
.session(sessionId)
|
||||
.now()
|
||||
.layer(Layer.Activity)
|
||||
.label("alias", getActivityDef().getAlias())
|
||||
.label("driver", getActivityDef().getActivityType())
|
||||
.label("workload", getActivityDef().getParams().getOptionalString("workload").orElse("none"))
|
||||
.detail("params", getActivityDef().toString())
|
||||
.build()
|
||||
);
|
||||
|
||||
activitylogger.debug("START/before alias=(" + activity.getAlias() + ")");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022 nosqlbench
|
||||
* Copyright (c) 2022-2023 nosqlbench
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -41,6 +41,7 @@ public class ActivityLoader {
|
||||
}
|
||||
|
||||
public synchronized Activity loadActivity(ActivityDef activityDef) {
|
||||
activityDef= activityDef.deprecate("yaml","workload").deprecate("type","driver");
|
||||
Activity activity = new StandardActivityType(activityDef).getAssembledActivity(activityDef, activityMap);
|
||||
activityMap.put(activity.getAlias(),activity);
|
||||
logger.debug("Resolved activity for alias '" + activityDef.getAlias() + "'");
|
||||
|
@ -86,8 +86,6 @@ public class ScenarioController {
|
||||
Future<ExecutionResult> startedActivity = activitiesExecutor.submit(executor);
|
||||
ActivityRuntimeInfo activityRuntimeInfo = new ActivityRuntimeInfo(activity, startedActivity, executor);
|
||||
this.activityInfoMap.put(activity.getAlias(), activityRuntimeInfo);
|
||||
executor.startActivity();
|
||||
scenariologger.debug("STARTED " + activityDef.getAlias());
|
||||
}
|
||||
return this.activityInfoMap.get(activityDef.getAlias());
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022 nosqlbench
|
||||
* Copyright (c) 2022-2023 nosqlbench
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -45,45 +45,48 @@ import static org.assertj.core.api.Assertions.fail;
|
||||
class ActivityExecutorTest {
|
||||
private static final Logger logger = LogManager.getLogger(ActivityExecutorTest.class);
|
||||
|
||||
@Test
|
||||
synchronized void testRestart() {
|
||||
ActivityDef activityDef = ActivityDef.parseActivityDef("driver=diag;alias=test;cycles=1000;op=initdelay:initdelay=5000;");
|
||||
new ActivityTypeLoader().load(activityDef);
|
||||
|
||||
final Activity activity = new DelayedInitActivity(activityDef);
|
||||
InputDispenser inputDispenser = new CoreInputDispenser(activity);
|
||||
ActionDispenser adisp = new CoreActionDispenser(activity);
|
||||
OutputDispenser tdisp = CoreServices.getOutputDispenser(activity).orElse(null);
|
||||
|
||||
final MotorDispenser<?> mdisp = new CoreMotorDispenser(activity, inputDispenser, adisp, tdisp);
|
||||
activity.setActionDispenserDelegate(adisp);
|
||||
activity.setOutputDispenserDelegate(tdisp);
|
||||
activity.setInputDispenserDelegate(inputDispenser);
|
||||
activity.setMotorDispenserDelegate(mdisp);
|
||||
|
||||
final ExecutorService executor = Executors.newCachedThreadPool();
|
||||
ActivityExecutor activityExecutor = new ActivityExecutor(activity, "test-restart");
|
||||
final Future<ExecutionResult> future = executor.submit(activityExecutor);
|
||||
try {
|
||||
activityDef.setThreads(1);
|
||||
activityExecutor.startActivity();
|
||||
activityExecutor.stopActivity();
|
||||
activityExecutor.startActivity();
|
||||
activityExecutor.startActivity();
|
||||
future.get();
|
||||
Thread.sleep(500L);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
executor.shutdown();
|
||||
assertThat(inputDispenser.getInput(10).getInputSegment(3)).isNull();
|
||||
|
||||
}
|
||||
// TODO: Design review of this mechanism
|
||||
// @Test
|
||||
// synchronized void testRestart() {
|
||||
// ActivityDef activityDef = ActivityDef.parseActivityDef("driver=diag;alias=test-restart;cycles=1000;cyclerate=10;op=initdelay:initdelay=5000;");
|
||||
// new ActivityTypeLoader().load(activityDef);
|
||||
//
|
||||
// final Activity activity = new DelayedInitActivity(activityDef);
|
||||
// InputDispenser inputDispenser = new CoreInputDispenser(activity);
|
||||
// ActionDispenser adisp = new CoreActionDispenser(activity);
|
||||
// OutputDispenser tdisp = CoreServices.getOutputDispenser(activity).orElse(null);
|
||||
//
|
||||
// final MotorDispenser<?> mdisp = new CoreMotorDispenser(activity, inputDispenser, adisp, tdisp);
|
||||
// activity.setActionDispenserDelegate(adisp);
|
||||
// activity.setOutputDispenserDelegate(tdisp);
|
||||
// activity.setInputDispenserDelegate(inputDispenser);
|
||||
// activity.setMotorDispenserDelegate(mdisp);
|
||||
//
|
||||
// final ExecutorService executor = Executors.newCachedThreadPool();
|
||||
// ActivityExecutor activityExecutor = new ActivityExecutor(activity, "test-restart");
|
||||
// final Future<ExecutionResult> future = executor.submit(activityExecutor);
|
||||
// try {
|
||||
// activityDef.setThreads(1);
|
||||
// activityExecutor.startActivity();
|
||||
// Thread.sleep(100L);
|
||||
// activityExecutor.stopActivity();
|
||||
// Thread.sleep(100L);
|
||||
// activityExecutor.startActivity();
|
||||
// Thread.sleep(100L);
|
||||
// activityExecutor.stopActivity();
|
||||
// future.get();
|
||||
// } catch (Exception e) {
|
||||
// throw new RuntimeException(e);
|
||||
// }
|
||||
// executor.shutdown();
|
||||
// assertThat(inputDispenser.getInput(10).getInputSegment(3)).isNotNull();
|
||||
//
|
||||
// }
|
||||
|
||||
@Test
|
||||
synchronized void testDelayedStartSanity() {
|
||||
|
||||
final ActivityDef activityDef = ActivityDef.parseActivityDef("driver=diag;alias=test;cycles=1000;initdelay=5000;");
|
||||
final ActivityDef activityDef = ActivityDef.parseActivityDef("driver=diag;alias=test-delayed-start;cycles=1000;initdelay=2000;");
|
||||
new ActivityTypeLoader().load(activityDef);
|
||||
|
||||
final Activity activity = new DelayedInitActivity(activityDef);
|
||||
@ -119,7 +122,7 @@ class ActivityExecutorTest {
|
||||
@Test
|
||||
synchronized void testNewActivityExecutor() {
|
||||
|
||||
ActivityDef activityDef = ActivityDef.parseActivityDef("driver=diag;alias=test;cycles=1000;initdelay=5000;");
|
||||
ActivityDef activityDef = ActivityDef.parseActivityDef("driver=diag;alias=test-dynamic-params;cycles=1000;initdelay=5000;");
|
||||
new ActivityTypeLoader().load(activityDef);
|
||||
|
||||
getActivityMotorFactory(motorActionDelay(999), new AtomicInput(activityDef));
|
||||
@ -140,7 +143,7 @@ class ActivityExecutorTest {
|
||||
activityDef.setThreads(5);
|
||||
activityExecutor.startActivity();
|
||||
|
||||
int[] speeds = new int[]{1, 2000, 5, 2000, 2, 2000};
|
||||
int[] speeds = new int[]{1, 50, 5, 50, 2, 50};
|
||||
for (int offset = 0; offset < speeds.length; offset += 2) {
|
||||
int threadTarget = speeds[offset];
|
||||
int threadTime = speeds[offset + 1];
|
||||
@ -158,7 +161,7 @@ class ActivityExecutorTest {
|
||||
// Used for slowing the roll due to state transitions in test.
|
||||
try {
|
||||
activityExecutor.stopActivity();
|
||||
Thread.sleep(2000L);
|
||||
// Thread.sleep(2000L);
|
||||
} catch (Exception e) {
|
||||
fail("Not expecting exception", e);
|
||||
}
|
||||
|
@ -1,36 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2022 nosqlbench
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package io.nosqlbench.engine.core.experimental;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CompletionStage;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
public class CompletableTests {
|
||||
|
||||
@Test
|
||||
public void testCompletionStages() {
|
||||
CompletableFuture<Object> f = new CompletableFuture<>();
|
||||
ExecutorService executorService = Executors.newCachedThreadPool();
|
||||
CompletableFuture<Object> objectCompletableFuture = f.completeAsync(() -> "foo", executorService);
|
||||
boolean bar = objectCompletableFuture.complete("bar");
|
||||
|
||||
}
|
||||
}
|
@ -33,7 +33,7 @@ You can mark statements as schema phase statements by adding this set of
|
||||
tags to the statements, either directly, or by block:
|
||||
|
||||
tags:
|
||||
phase: schema
|
||||
block: schema
|
||||
|
||||
## Rampup phase
|
||||
|
||||
@ -64,7 +64,7 @@ You can mark statements as rampup phase statements by adding this set of
|
||||
tags to the statements, either directly, or by block:
|
||||
|
||||
tags:
|
||||
phase: rampup
|
||||
block: rampup
|
||||
|
||||
## Main phase
|
||||
|
||||
@ -76,4 +76,4 @@ You can mark statement as schema phase statements by adding this set of
|
||||
tags to the statements, either directly, or by block:
|
||||
|
||||
tags:
|
||||
phase: main
|
||||
block: main
|
||||
|
@ -15,7 +15,7 @@ command line, go ahead and execute the following command, replacing
|
||||
the `host=<host-or-ip>` with that of one of your database nodes.
|
||||
|
||||
```text
|
||||
./nb run driver=cql workload=cql-keyvalue tags=phase:schema host=<host-or-ip>
|
||||
./nb run driver=cql workload=cql-keyvalue tags=block:"schema.*" host=<host-or-ip>
|
||||
```
|
||||
|
||||
This command is creating the following schema in your database:
|
||||
@ -45,8 +45,8 @@ defines the activity.
|
||||
In this example, we use `cql-keyvalue` which is a pre-built workload that
|
||||
is packaged with nosqlbench.
|
||||
|
||||
`tags=phase:schema` tells nosqlbench to run the yaml block that has
|
||||
the `phase:schema` defined as one of its tags.
|
||||
`tags=block:"schema.*"` tells nosqlbench to run the yaml block that has
|
||||
the `block:"schema.*"` defined as one of its tags.
|
||||
|
||||
In this example, that is the DDL portion of the `cql-keyvalue`
|
||||
workload. `host=...` tells nosqlbench how to connect to your database,
|
||||
@ -68,7 +68,7 @@ statements.
|
||||
|
||||
Go ahead and execute the following command:
|
||||
|
||||
./nb run driver=stdout workload=cql-keyvalue tags=phase:rampup cycles=10
|
||||
./nb run driver=stdout workload=cql-keyvalue tags=block:rampup cycles=10
|
||||
|
||||
You should see 10 of the following statements in your console
|
||||
|
||||
@ -91,12 +91,12 @@ be the same from run to run.
|
||||
Now we are ready to write some data to our database. Go ahead and execute
|
||||
the following from your command line:
|
||||
|
||||
./nb run driver=cql workload=cql-keyvalue tags=phase:rampup host=<host-or-ip> cycles=100k --progress console:1s
|
||||
./nb run driver=cql workload=cql-keyvalue tags=block:rampup host=<host-or-ip> cycles=100k --progress console:1s
|
||||
|
||||
Note the differences between this and the command that we used to generate
|
||||
the schema.
|
||||
|
||||
`tags=phase:rampup` is running the yaml block in `cql-keyvalue` that has
|
||||
`tags=block:rampup` is running the yaml block in `cql-keyvalue` that has
|
||||
only INSERT statements.
|
||||
|
||||
`cycles=100k` will run a total of 100,000 operations, in this case,
|
||||
@ -139,7 +139,7 @@ Now that we have a base dataset of 100k rows in the database, we will now
|
||||
run a mixed read / write workload, by default this runs a 50% read / 50%
|
||||
write workload.
|
||||
|
||||
./nb run driver=cql workload=cql-keyvalue tags=phase:main host=<host-or-ip> cycles=100k cyclerate=5000 threads=50 --progress console:1s
|
||||
./nb run driver=cql workload=cql-keyvalue tags=block:main host=<host-or-ip> cycles=100k cyclerate=5000 threads=50 --progress console:1s
|
||||
|
||||
You should see output that looks like this:
|
||||
|
||||
@ -174,7 +174,7 @@ cql-keyvalue: 100.00%/Finished (details: min=0 cycle=100000 max=100000)
|
||||
|
||||
We have a few new command line options here:
|
||||
|
||||
`tags=phase:main` is using a new block in our activity's yaml that
|
||||
`tags=block:main` is using a new block in our activity's yaml that
|
||||
contains both read and write queries.
|
||||
|
||||
`threads=50` is an important one. The default for nosqlbench is to run
|
||||
|
@ -103,8 +103,8 @@ semicolon, then a newline is also added immediately after.
|
||||
|
||||
~~~text
|
||||
./nb \
|
||||
start driver=stdout alias=a cycles=100K workload=cql-iot tags=phase:main\
|
||||
start driver=stdout alias=b cycles=200K workload=cql-iot tags=phase:main\
|
||||
start driver=stdout alias=a cycles=100K workload=cql-iot tags=block:main\
|
||||
start driver=stdout alias=b cycles=200K workload=cql-iot tags=block:main\
|
||||
waitmillis 10000 \
|
||||
await one \
|
||||
stop two
|
||||
|
@ -46,9 +46,9 @@ built-ins.
|
||||
|
||||
Each built-in contains the following tags that can be used to break the workload up into uniform phases:
|
||||
|
||||
- schema - selected with `tags=phase:schema`
|
||||
- rampup - selected with `tags=phase:rampup`
|
||||
- main - selected with `tags=phase:main`
|
||||
- schema - selected with `tags=block:"schema.*"`
|
||||
- rampup - selected with `tags=block:rampup`
|
||||
- main - selected with `tags=block:main`
|
||||
|
||||
### Parameters
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
|
||||
<properties>
|
||||
|
||||
<revision>5.17.1-SNAPSHOT</revision>
|
||||
<revision>5.17.2-SNAPSHOT</revision>
|
||||
<!-- Set this level to override the logging level for tests during build -->
|
||||
<project.testlevel>INFO</project.testlevel>
|
||||
<!-- Set this level to override the logging level for tests logging configuration during build -->
|
||||
@ -40,6 +40,7 @@
|
||||
<maven.compiler.source>17</maven.compiler.source>
|
||||
<maven.compiler.target>17</maven.compiler.target>
|
||||
|
||||
<PROG>nb5</PROG>
|
||||
</properties>
|
||||
|
||||
<name>${project.artifactId}</name>
|
||||
@ -87,7 +88,7 @@
|
||||
<dependency>
|
||||
<groupId>org.snakeyaml</groupId>
|
||||
<artifactId>snakeyaml-engine</artifactId>
|
||||
<version>2.5</version>
|
||||
<version>2.6</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
@ -205,7 +206,7 @@
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-handler</artifactId>
|
||||
<version>4.1.86.Final</version>
|
||||
<version>4.1.87.Final</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
@ -472,6 +473,14 @@
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>src/main/resources</directory>
|
||||
<excludes>
|
||||
<exclude>log4j2.xml</exclude>
|
||||
</excludes>
|
||||
</resource>
|
||||
</resources>
|
||||
<testResources>
|
||||
<testResource>
|
||||
<directory>src/test/resources</directory>
|
||||
@ -480,7 +489,7 @@
|
||||
</excludes>
|
||||
<filtering>true</filtering>
|
||||
</testResource>
|
||||
<testResource>
|
||||
<testResource> <!-- Not sure why the complementary configs are here.. -->
|
||||
<directory>src/test/resources</directory>
|
||||
<includes>
|
||||
<include>log4j2-test.xml</include>
|
||||
@ -500,7 +509,6 @@
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-resources-plugin</artifactId>
|
||||
<version>3.2.0</version>
|
||||
<configuration>
|
||||
<propertiesEncoding>ISO-8859-1</propertiesEncoding>
|
||||
</configuration>
|
||||
@ -534,7 +542,6 @@
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.10.1</version>
|
||||
<configuration>
|
||||
<debug>true</debug>
|
||||
<target>17</target>
|
||||
@ -550,7 +557,6 @@
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<version>3.0.0-M6</version>
|
||||
<configuration>
|
||||
<argLine>-ea ${argLine}</argLine>
|
||||
<systemPropertyVariables>
|
||||
@ -575,7 +581,6 @@
|
||||
<plugin>
|
||||
<groupId>org.jacoco</groupId>
|
||||
<artifactId>jacoco-maven-plugin</artifactId>
|
||||
<version>0.8.8</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>prepare-agent</id>
|
||||
@ -626,7 +631,6 @@
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<version>3.0.0-M6</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>run-tests</id>
|
||||
@ -654,7 +658,6 @@
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>3.4.1</version>
|
||||
<configuration>
|
||||
<release>17</release>
|
||||
<doctitle>${javadoc.name}</doctitle>
|
||||
@ -685,7 +688,6 @@
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
@ -701,7 +703,6 @@
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.13</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<serverId>ossrh</serverId>
|
||||
@ -713,7 +714,6 @@
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-gpg-plugin</artifactId>
|
||||
<version>3.0.1</version>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
@ -743,7 +743,6 @@
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-enforcer-plugin</artifactId>
|
||||
<version>3.0.0-M3</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>enforce-java</id>
|
||||
@ -767,18 +766,18 @@
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<version>3.3.0</version>
|
||||
<version>3.4.2</version>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-release-plugin</artifactId>
|
||||
<version>3.0.0-M6</version>
|
||||
<version>3.0.0-M7</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-gpg-plugin</artifactId>
|
||||
<version>1.6</version>
|
||||
<version>3.0.1</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
@ -788,27 +787,27 @@
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<version>3.0.0-M4</version>
|
||||
<version>3.0.0-M8</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<version>3.0.0-M4</version>
|
||||
<version>3.0.0-M8</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>3.1.1</version>
|
||||
<version>3.4.1</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>3.0.1</version>
|
||||
<version>3.2.1</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.8</version>
|
||||
<version>1.6.13</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.antlr</groupId>
|
||||
@ -818,43 +817,43 @@
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>exec-maven-plugin</artifactId>
|
||||
<version>1.6.0</version>
|
||||
<version>3.1.0</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-enforcer-plugin</artifactId>
|
||||
<version>3.0.0-M3</version>
|
||||
<version>3.2.1</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-clean-plugin</artifactId>
|
||||
<version>3.1.0</version>
|
||||
<version>3.2.0</version>
|
||||
</plugin>
|
||||
<!-- see http://maven.apache.org/ref/current/maven-core/default-bindings.html#Plugin_bindings_for_jar_packaging -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-resources-plugin</artifactId>
|
||||
<version>3.2.0</version>
|
||||
<version>3.3.0</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.jacoco</groupId>
|
||||
<artifactId>org.jacoco.ant</artifactId>
|
||||
<artifactId>jacoco-maven-plugin</artifactId>
|
||||
<version>${jacoco.version}</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>3.1.1</version>
|
||||
<version>3.3.0</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-install-plugin</artifactId>
|
||||
<version>3.0.0-M1</version>
|
||||
<version>3.1.0</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-deploy-plugin</artifactId>
|
||||
<version>3.0.0-M1</version>
|
||||
<version>3.0.0</version>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</pluginManagement>
|
||||
|
@ -76,6 +76,7 @@ public class NBIO implements NBPathsAPI.Facets {
|
||||
return Arrays.asList(split);
|
||||
}
|
||||
|
||||
|
||||
public static CSVParser readFileCSV(String filename, String... searchPaths) {
|
||||
return NBIO.readFileDelimCSV(filename, ',', searchPaths);
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package io.nosqlbench.api.engine.activityimpl;
|
||||
|
||||
import io.nosqlbench.api.config.NBNamedElement;
|
||||
import io.nosqlbench.api.engine.util.Unit;
|
||||
import io.nosqlbench.api.errors.BasicError;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
@ -213,4 +214,22 @@ public class ActivityDef implements NBNamedElement {
|
||||
public String getName() {
|
||||
return getAlias();
|
||||
}
|
||||
|
||||
public ActivityDef deprecate(String deprecatedName, String newName) {
|
||||
Object deprecatedParam = this.parameterMap.get(deprecatedName);
|
||||
if (deprecatedParam==null) {
|
||||
return this;
|
||||
}
|
||||
if (deprecatedParam instanceof CharSequence chars) {
|
||||
if (this.parameterMap.containsKey(newName)) {
|
||||
throw new BasicError("You have specified activity param '" + deprecatedName + "' in addition to the valid name '" + newName +"'. Remove '" + deprecatedName + "'.");
|
||||
} else {
|
||||
logger.warn("Auto replacing deprecated activity param '" + deprecatedName + "="+ chars +"' with new '" + newName +"="+ chars +"'.");
|
||||
parameterMap.put(newName,parameterMap.remove(deprecatedName));
|
||||
}
|
||||
} else {
|
||||
throw new BasicError("Can't replace deprecated name with value of type " + deprecatedName.getClass().getCanonicalName());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
10
nbr/pom.xml
10
nbr/pom.xml
@ -82,9 +82,19 @@
|
||||
<includes>
|
||||
<include>version.properties</include>
|
||||
<include>nb_version_info.md</include>
|
||||
<include>log4j2.xml</include>
|
||||
</includes>
|
||||
<filtering>true</filtering>
|
||||
</resource>
|
||||
<resource>
|
||||
<directory>src/main/resources</directory>
|
||||
<includes>
|
||||
<include>examples/**</include>
|
||||
<include>scripts/**</include>
|
||||
<include>**/*.md</include>
|
||||
</includes>
|
||||
<filtering>false</filtering>
|
||||
</resource>
|
||||
</resources>
|
||||
|
||||
<plugins>
|
||||
|
@ -33,9 +33,6 @@ import java.nio.file.StandardOpenOption;
|
||||
import java.util.Date;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.function.Function;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipOutputStream;
|
||||
@ -65,10 +62,7 @@ public class BundledMarkdownZipExporter {
|
||||
}
|
||||
}
|
||||
|
||||
ExecutorService executorService = Executors.newSingleThreadExecutor();
|
||||
Future<Map<String, StringBuilder>> future = executorService.submit(new VirtDataGenDocsApp(null));
|
||||
Map<String, StringBuilder> builderMap = future.get();
|
||||
executorService.shutdown();
|
||||
Map<String, StringBuilder> builderMap= new VirtDataGenDocsApp(null).call();
|
||||
String bindingsPrefix ="bindings/";
|
||||
for(Map.Entry<String, StringBuilder> entry : builderMap.entrySet())
|
||||
{
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user