Compare commits
3 Commits
letsencryp
...
refactor_b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9bfa6db64a | ||
|
|
cfd3cf78f8 | ||
|
|
c85323baa4 |
42
.eslintrc.js
42
.eslintrc.js
@@ -15,10 +15,9 @@ module.exports = {
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['cli.{,c,m}js', '*-cli.{,c,m}js', '**/*cli*/**/*.{,c,m}js', '**/scripts/**.{,c,m}js'],
|
||||
files: ['cli.{,c,m}js', '*-cli.{,c,m}js', '**/*cli*/**/*.{,c,m}js'],
|
||||
rules: {
|
||||
'n/no-process-exit': 'off',
|
||||
'n/shebang': 'off',
|
||||
'no-console': 'off',
|
||||
},
|
||||
},
|
||||
@@ -29,7 +28,7 @@ module.exports = {
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['*.{integ,spec,test}.{,c,m}js'],
|
||||
files: ['*.{spec,test}.{,c,m}js'],
|
||||
rules: {
|
||||
'n/no-unpublished-require': 'off',
|
||||
'n/no-unpublished-import': 'off',
|
||||
@@ -47,38 +46,6 @@ module.exports = {
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['@xen-orchestra/lite/**/*.{vue,ts}'],
|
||||
parserOptions: {
|
||||
sourceType: 'module',
|
||||
},
|
||||
plugins: ['import'],
|
||||
extends: [
|
||||
'plugin:import/recommended',
|
||||
'plugin:import/typescript',
|
||||
'plugin:vue/vue3-recommended',
|
||||
'@vue/eslint-config-typescript/recommended',
|
||||
'@vue/eslint-config-prettier',
|
||||
],
|
||||
settings: {
|
||||
'import/resolver': {
|
||||
typescript: true,
|
||||
'eslint-import-resolver-custom-alias': {
|
||||
alias: {
|
||||
'@': './src',
|
||||
},
|
||||
extensions: ['.ts'],
|
||||
packages: ['@xen-orchestra/lite'],
|
||||
},
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
'no-void': 'off',
|
||||
'n/no-missing-import': 'off', // using 'import' plugin instead to support TS aliases
|
||||
'@typescript-eslint/no-explicit-any': 'off',
|
||||
'vue/require-default-prop': 'off', // https://github.com/vuejs/eslint-plugin-vue/issues/2051
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
parserOptions: {
|
||||
@@ -101,11 +68,6 @@ module.exports = {
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
|
||||
// this rule can prevent race condition bugs like parallel `a += await foo()`
|
||||
//
|
||||
// as it has a lots of false positive, it is only enabled as a warning for now
|
||||
'require-atomic-updates': 'warn',
|
||||
|
||||
strict: 'error',
|
||||
},
|
||||
}
|
||||
|
||||
48
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
48
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
1. ⚠️ **If you don't follow this template, the issue will be closed**.
|
||||
2. ⚠️ **If your issue can't be easily reproduced, please report it [on the forum first](https://xcp-ng.org/forum/category/12/xen-orchestra)**.
|
||||
|
||||
Are you using XOA or XO from the sources?
|
||||
|
||||
If XOA:
|
||||
|
||||
- which release channel? (`stable` vs `latest`)
|
||||
- please consider creating a support ticket in [your dedicated support area](https://xen-orchestra.com/#!/member/support)
|
||||
|
||||
If XO from the sources:
|
||||
|
||||
- Provide **your commit number**. If it's older than a week, we won't investigate
|
||||
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
|
||||
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Environment (please provide the following information):**
|
||||
|
||||
- Node: [e.g. 16.12.1]
|
||||
- hypervisor: [e.g. XCP-ng 8.2.0]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
119
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
119
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,119 +0,0 @@
|
||||
name: Bug Report
|
||||
description: Create a report to help us improve
|
||||
labels: ['type: bug :bug:', 'status: triaging :triangular_flag_on_post:']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
1. ⚠️ **If you don't follow this template, the issue will be closed**.
|
||||
2. ⚠️ **If your issue can't be easily reproduced, please report it [on the forum first](https://xcp-ng.org/forum/category/12/xen-orchestra)**.
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: '## Are you using XOA or XO from the sources?'
|
||||
- type: dropdown
|
||||
id: xo-origin
|
||||
attributes:
|
||||
label: Are you using XOA or XO from the sources?
|
||||
options:
|
||||
- XOA
|
||||
- XO from the sources
|
||||
- both
|
||||
validations:
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: '### If XOA:'
|
||||
- type: dropdown
|
||||
id: xoa-channel
|
||||
attributes:
|
||||
label: Which release channel?
|
||||
description: please consider creating a support ticket in [your dedicated support area](https://xen-orchestra.com/#!/member/support)
|
||||
options:
|
||||
- stable
|
||||
- latest
|
||||
- both
|
||||
validations:
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: '### If XO from the sources:'
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
|
||||
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
|
||||
- type: input
|
||||
id: xo-sources-commit-number
|
||||
attributes:
|
||||
label: Provide your commit number
|
||||
description: If it's older than a week, we won't investigate
|
||||
placeholder: e.g. 579f0
|
||||
validations:
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: '## Bug description:'
|
||||
- type: textarea
|
||||
id: bug-description
|
||||
attributes:
|
||||
label: Describe the bug
|
||||
description: A clear and concise description of what the bug is
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: error-message
|
||||
attributes:
|
||||
label: Error message
|
||||
render: Markdown
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: steps
|
||||
attributes:
|
||||
label: To reproduce
|
||||
description: 'Steps to reproduce the behavior:'
|
||||
value: |
|
||||
1. Go to '...'
|
||||
2. Click on '...'
|
||||
3. Scroll down to '...'
|
||||
4. See error
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: A clear and concise description of what you expected to happen
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: screenshots
|
||||
attributes:
|
||||
label: Screenshots
|
||||
description: If applicable, add screenshots to help explain your problem
|
||||
validations:
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: '## Environment (please provide the following information):'
|
||||
- type: input
|
||||
id: node-version
|
||||
attributes:
|
||||
label: Node
|
||||
placeholder: e.g. 16.12.1
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: hypervisor-version
|
||||
attributes:
|
||||
label: Hypervisor
|
||||
placeholder: e.g. XCP-ng 8.2.0
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: additional-context
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Add any other context about the problem here
|
||||
validations:
|
||||
required: false
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -30,12 +30,8 @@ pnpm-debug.log.*
|
||||
yarn-error.log
|
||||
yarn-error.log.*
|
||||
.env
|
||||
*.tsbuildinfo
|
||||
|
||||
# code coverage
|
||||
.nyc_output/
|
||||
coverage/
|
||||
.turbo/
|
||||
|
||||
# https://node-tap.org/dot-tap-folder/
|
||||
.tap/
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = {
|
||||
arrowParens: 'avoid',
|
||||
jsxSingleQuote: true,
|
||||
semi: false,
|
||||
singleQuote: true,
|
||||
trailingComma: 'es5',
|
||||
|
||||
// 2020-11-24: Requested by nraynaud and approved by the rest of the team
|
||||
//
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"sinon": "^17.0.1",
|
||||
"sinon": "^15.0.1",
|
||||
"tap": "^16.3.0",
|
||||
"test": "^3.2.1"
|
||||
}
|
||||
|
||||
@@ -13,15 +13,12 @@ describe('decorateWith', () => {
|
||||
const expectedFn = Function.prototype
|
||||
const newFn = () => {}
|
||||
|
||||
const decorator = decorateWith(
|
||||
function wrapper(fn, ...args) {
|
||||
assert.deepStrictEqual(fn, expectedFn)
|
||||
assert.deepStrictEqual(args, expectedArgs)
|
||||
const decorator = decorateWith(function wrapper(fn, ...args) {
|
||||
assert.deepStrictEqual(fn, expectedFn)
|
||||
assert.deepStrictEqual(args, expectedArgs)
|
||||
|
||||
return newFn
|
||||
},
|
||||
...expectedArgs
|
||||
)
|
||||
return newFn
|
||||
}, ...expectedArgs)
|
||||
|
||||
const descriptor = {
|
||||
configurable: true,
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.4",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -23,13 +23,13 @@
|
||||
"test": "node--test"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/multi-key-map": "^0.2.0",
|
||||
"@vates/multi-key-map": "^0.1.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"ensure-array": "^1.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"sinon": "^17.0.1",
|
||||
"sinon": "^15.0.1",
|
||||
"test": "^3.2.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import LRU from 'lru-cache'
|
||||
import Fuse from 'fuse-native'
|
||||
import { VhdSynthetic } from 'vhd-lib'
|
||||
import { Disposable, fromCallback } from 'promise-toolbox'
|
||||
'use strict'
|
||||
|
||||
const LRU = require('lru-cache')
|
||||
const Fuse = require('fuse-native')
|
||||
const { VhdSynthetic } = require('vhd-lib')
|
||||
const { Disposable, fromCallback } = require('promise-toolbox')
|
||||
|
||||
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
|
||||
const stat = st => ({
|
||||
@@ -14,7 +16,7 @@ const stat = st => ({
|
||||
gid: st.gid !== undefined ? st.gid : process.getgid(),
|
||||
})
|
||||
|
||||
export const mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
|
||||
exports.mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
|
||||
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
|
||||
|
||||
const cache = new LRU({
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@vates/fuse-vhd",
|
||||
"version": "2.0.0",
|
||||
"version": "1.0.0",
|
||||
"license": "ISC",
|
||||
"private": false,
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
|
||||
@@ -15,14 +15,13 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
"node": ">=10.0"
|
||||
},
|
||||
"main": "./index.mjs",
|
||||
"dependencies": {
|
||||
"fuse-native": "^2.2.6",
|
||||
"lru-cache": "^7.14.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"vhd-lib": "^4.9.0"
|
||||
"vhd-lib": "^4.4.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
|
||||
@@ -17,14 +17,4 @@ map.get(['foo', 'bar']) // 2
|
||||
map.get(['bar', 'foo']) // 3
|
||||
map.get([OBJ]) // 4
|
||||
map.get([{}]) // undefined
|
||||
|
||||
map.delete([])
|
||||
|
||||
for (const [key, value] of map.entries() {
|
||||
console.log(key, value)
|
||||
}
|
||||
|
||||
for (const value of map.values()) {
|
||||
console.log(value)
|
||||
}
|
||||
```
|
||||
|
||||
@@ -35,16 +35,6 @@ map.get(['foo', 'bar']) // 2
|
||||
map.get(['bar', 'foo']) // 3
|
||||
map.get([OBJ]) // 4
|
||||
map.get([{}]) // undefined
|
||||
|
||||
map.delete([])
|
||||
|
||||
for (const [key, value] of map.entries() {
|
||||
console.log(key, value)
|
||||
}
|
||||
|
||||
for (const value of map.values()) {
|
||||
console.log(value)
|
||||
}
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
@@ -36,31 +36,14 @@ function del(node, i, keys) {
|
||||
return node
|
||||
}
|
||||
|
||||
function* entries(node, key) {
|
||||
if (node !== undefined) {
|
||||
if (node instanceof Node) {
|
||||
const { value } = node
|
||||
if (value !== undefined) {
|
||||
yield [key, node.value]
|
||||
}
|
||||
|
||||
for (const [childKey, child] of node.children.entries()) {
|
||||
yield* entries(child, key.concat(childKey))
|
||||
}
|
||||
} else {
|
||||
yield [key, node]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function get(node, i, keys) {
|
||||
return i === keys.length
|
||||
? node instanceof Node
|
||||
? node.value
|
||||
: node
|
||||
: node instanceof Node
|
||||
? get(node.children.get(keys[i]), i + 1, keys)
|
||||
: undefined
|
||||
? get(node.children.get(keys[i]), i + 1, keys)
|
||||
: undefined
|
||||
}
|
||||
|
||||
function set(node, i, keys, value) {
|
||||
@@ -86,22 +69,6 @@ function set(node, i, keys, value) {
|
||||
return node
|
||||
}
|
||||
|
||||
function* values(node) {
|
||||
if (node !== undefined) {
|
||||
if (node instanceof Node) {
|
||||
const { value } = node
|
||||
if (value !== undefined) {
|
||||
yield node.value
|
||||
}
|
||||
for (const child of node.children.values()) {
|
||||
yield* values(child)
|
||||
}
|
||||
} else {
|
||||
yield node
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exports.MultiKeyMap = class MultiKeyMap {
|
||||
constructor() {
|
||||
// each node is either a value or a Node if it contains children
|
||||
@@ -112,10 +79,6 @@ exports.MultiKeyMap = class MultiKeyMap {
|
||||
this._root = del(this._root, 0, keys)
|
||||
}
|
||||
|
||||
entries() {
|
||||
return entries(this._root, [])
|
||||
}
|
||||
|
||||
get(keys) {
|
||||
return get(this._root, 0, keys)
|
||||
}
|
||||
@@ -123,8 +86,4 @@ exports.MultiKeyMap = class MultiKeyMap {
|
||||
set(keys, value) {
|
||||
this._root = set(this._root, 0, keys, value)
|
||||
}
|
||||
|
||||
values() {
|
||||
return values(this._root)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ describe('MultiKeyMap', () => {
|
||||
// reverse composite key
|
||||
['bar', 'foo'],
|
||||
]
|
||||
const values = keys.map(() => Math.random())
|
||||
const values = keys.map(() => ({}))
|
||||
|
||||
// set all values first to make sure they are all stored and not only the
|
||||
// last one
|
||||
@@ -27,12 +27,6 @@ describe('MultiKeyMap', () => {
|
||||
map.set(key, values[i])
|
||||
})
|
||||
|
||||
assert.deepEqual(
|
||||
Array.from(map.entries()),
|
||||
keys.map((key, i) => [key, values[i]])
|
||||
)
|
||||
assert.deepEqual(Array.from(map.values()), values)
|
||||
|
||||
keys.forEach((key, i) => {
|
||||
// copy the key to make sure the array itself is not the key
|
||||
assert.strictEqual(map.get(key.slice()), values[i])
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.2.0",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
42
@vates/nbd-client/constants.js
Normal file
42
@vates/nbd-client/constants.js
Normal file
@@ -0,0 +1,42 @@
|
||||
'use strict'
|
||||
exports.INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
|
||||
exports.OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
|
||||
exports.NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
|
||||
exports.NBD_OPT_EXPORT_NAME = 1
|
||||
exports.NBD_OPT_ABORT = 2
|
||||
exports.NBD_OPT_LIST = 3
|
||||
exports.NBD_OPT_STARTTLS = 5
|
||||
exports.NBD_OPT_INFO = 6
|
||||
exports.NBD_OPT_GO = 7
|
||||
|
||||
exports.NBD_FLAG_HAS_FLAGS = 1 << 0
|
||||
exports.NBD_FLAG_READ_ONLY = 1 << 1
|
||||
exports.NBD_FLAG_SEND_FLUSH = 1 << 2
|
||||
exports.NBD_FLAG_SEND_FUA = 1 << 3
|
||||
exports.NBD_FLAG_ROTATIONAL = 1 << 4
|
||||
exports.NBD_FLAG_SEND_TRIM = 1 << 5
|
||||
|
||||
exports.NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
|
||||
|
||||
exports.NBD_CMD_FLAG_FUA = 1 << 0
|
||||
exports.NBD_CMD_FLAG_NO_HOLE = 1 << 1
|
||||
exports.NBD_CMD_FLAG_DF = 1 << 2
|
||||
exports.NBD_CMD_FLAG_REQ_ONE = 1 << 3
|
||||
exports.NBD_CMD_FLAG_FAST_ZERO = 1 << 4
|
||||
|
||||
exports.NBD_CMD_READ = 0
|
||||
exports.NBD_CMD_WRITE = 1
|
||||
exports.NBD_CMD_DISC = 2
|
||||
exports.NBD_CMD_FLUSH = 3
|
||||
exports.NBD_CMD_TRIM = 4
|
||||
exports.NBD_CMD_CACHE = 5
|
||||
exports.NBD_CMD_WRITE_ZEROES = 6
|
||||
exports.NBD_CMD_BLOCK_STATUS = 7
|
||||
exports.NBD_CMD_RESIZE = 8
|
||||
|
||||
exports.NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
|
||||
exports.NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
|
||||
exports.NBD_REPLY_ACK = 1
|
||||
|
||||
exports.NBD_DEFAULT_PORT = 10809
|
||||
exports.NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
|
||||
@@ -1,41 +0,0 @@
|
||||
export const INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
|
||||
export const OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
|
||||
export const NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
|
||||
export const NBD_OPT_EXPORT_NAME = 1
|
||||
export const NBD_OPT_ABORT = 2
|
||||
export const NBD_OPT_LIST = 3
|
||||
export const NBD_OPT_STARTTLS = 5
|
||||
export const NBD_OPT_INFO = 6
|
||||
export const NBD_OPT_GO = 7
|
||||
|
||||
export const NBD_FLAG_HAS_FLAGS = 1 << 0
|
||||
export const NBD_FLAG_READ_ONLY = 1 << 1
|
||||
export const NBD_FLAG_SEND_FLUSH = 1 << 2
|
||||
export const NBD_FLAG_SEND_FUA = 1 << 3
|
||||
export const NBD_FLAG_ROTATIONAL = 1 << 4
|
||||
export const NBD_FLAG_SEND_TRIM = 1 << 5
|
||||
|
||||
export const NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
|
||||
|
||||
export const NBD_CMD_FLAG_FUA = 1 << 0
|
||||
export const NBD_CMD_FLAG_NO_HOLE = 1 << 1
|
||||
export const NBD_CMD_FLAG_DF = 1 << 2
|
||||
export const NBD_CMD_FLAG_REQ_ONE = 1 << 3
|
||||
export const NBD_CMD_FLAG_FAST_ZERO = 1 << 4
|
||||
|
||||
export const NBD_CMD_READ = 0
|
||||
export const NBD_CMD_WRITE = 1
|
||||
export const NBD_CMD_DISC = 2
|
||||
export const NBD_CMD_FLUSH = 3
|
||||
export const NBD_CMD_TRIM = 4
|
||||
export const NBD_CMD_CACHE = 5
|
||||
export const NBD_CMD_WRITE_ZEROES = 6
|
||||
export const NBD_CMD_BLOCK_STATUS = 7
|
||||
export const NBD_CMD_RESIZE = 8
|
||||
|
||||
export const NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
|
||||
export const NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
|
||||
export const NBD_REPLY_ACK = 1
|
||||
|
||||
export const NBD_DEFAULT_PORT = 10809
|
||||
export const NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
|
||||
@@ -1,10 +1,8 @@
|
||||
import assert from 'node:assert'
|
||||
import { Socket } from 'node:net'
|
||||
import { connect } from 'node:tls'
|
||||
import { fromCallback, pRetry, pDelay, pTimeout, pFromCallback } from 'promise-toolbox'
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import {
|
||||
'use strict'
|
||||
const assert = require('node:assert')
|
||||
const { Socket } = require('node:net')
|
||||
const { connect } = require('node:tls')
|
||||
const {
|
||||
INIT_PASSWD,
|
||||
NBD_CMD_READ,
|
||||
NBD_DEFAULT_BLOCK_SIZE,
|
||||
@@ -19,12 +17,16 @@ import {
|
||||
NBD_REQUEST_MAGIC,
|
||||
OPTS_MAGIC,
|
||||
NBD_CMD_DISC,
|
||||
} from './constants.mjs'
|
||||
} = require('./constants.js')
|
||||
const { fromCallback, pRetry, pDelay, pTimeout } = require('promise-toolbox')
|
||||
const { readChunkStrict } = require('@vates/read-chunk')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
|
||||
const { warn } = createLogger('vates:nbd-client')
|
||||
|
||||
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
|
||||
|
||||
export default class NbdClient {
|
||||
module.exports = class NbdClient {
|
||||
#serverAddress
|
||||
#serverCert
|
||||
#serverPort
|
||||
@@ -38,7 +40,6 @@ export default class NbdClient {
|
||||
#readBlockRetries
|
||||
#reconnectRetry
|
||||
#connectTimeout
|
||||
#messageTimeout
|
||||
|
||||
// AFAIK, there is no guaranty the server answers in the same order as the queries
|
||||
// so we handle a backlog of command waiting for response and handle concurrency manually
|
||||
@@ -51,14 +52,7 @@ export default class NbdClient {
|
||||
#reconnectingPromise
|
||||
constructor(
|
||||
{ address, port = NBD_DEFAULT_PORT, exportname, cert },
|
||||
{
|
||||
connectTimeout = 6e4,
|
||||
messageTimeout = 6e4,
|
||||
waitBeforeReconnect = 1e3,
|
||||
readAhead = 10,
|
||||
readBlockRetries = 5,
|
||||
reconnectRetry = 5,
|
||||
} = {}
|
||||
{ connectTimeout = 6e4, waitBeforeReconnect = 1e3, readAhead = 10, readBlockRetries = 5, reconnectRetry = 5 } = {}
|
||||
) {
|
||||
this.#serverAddress = address
|
||||
this.#serverPort = port
|
||||
@@ -69,7 +63,6 @@ export default class NbdClient {
|
||||
this.#readBlockRetries = readBlockRetries
|
||||
this.#reconnectRetry = reconnectRetry
|
||||
this.#connectTimeout = connectTimeout
|
||||
this.#messageTimeout = messageTimeout
|
||||
}
|
||||
|
||||
get exportSize() {
|
||||
@@ -122,27 +115,13 @@ export default class NbdClient {
|
||||
if (!this.#connected) {
|
||||
return
|
||||
}
|
||||
this.#connected = false
|
||||
const socket = this.#serverSocket
|
||||
|
||||
const queryId = this.#nextCommandQueryId
|
||||
this.#nextCommandQueryId++
|
||||
|
||||
const buffer = Buffer.alloc(28)
|
||||
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
|
||||
buffer.writeInt16BE(0, 4) // no command flags for a disconnect
|
||||
buffer.writeInt16BE(NBD_CMD_DISC, 6) // we want to disconnect from nbd server
|
||||
buffer.writeBigUInt64BE(queryId, 8)
|
||||
buffer.writeBigUInt64BE(0n, 16)
|
||||
buffer.writeInt32BE(0, 24)
|
||||
const promise = pFromCallback(cb => {
|
||||
socket.end(buffer, 'utf8', cb)
|
||||
})
|
||||
try {
|
||||
await pTimeout.call(promise, this.#messageTimeout)
|
||||
} catch (error) {
|
||||
socket.destroy()
|
||||
}
|
||||
await this.#write(buffer)
|
||||
await this.#serverSocket.destroy()
|
||||
this.#serverSocket = undefined
|
||||
this.#connected = false
|
||||
}
|
||||
@@ -216,13 +195,11 @@ export default class NbdClient {
|
||||
}
|
||||
|
||||
#read(length) {
|
||||
const promise = readChunkStrict(this.#serverSocket, length)
|
||||
return pTimeout.call(promise, this.#messageTimeout)
|
||||
return readChunkStrict(this.#serverSocket, length)
|
||||
}
|
||||
|
||||
#write(buffer) {
|
||||
const promise = fromCallback.call(this.#serverSocket, 'write', buffer)
|
||||
return pTimeout.call(promise, this.#messageTimeout)
|
||||
return fromCallback.call(this.#serverSocket, 'write', buffer)
|
||||
}
|
||||
|
||||
async #readInt32() {
|
||||
@@ -255,20 +232,19 @@ export default class NbdClient {
|
||||
}
|
||||
try {
|
||||
this.#waitingForResponse = true
|
||||
const buffer = await this.#read(16)
|
||||
const magic = buffer.readInt32BE(0)
|
||||
const magic = await this.#readInt32()
|
||||
|
||||
if (magic !== NBD_REPLY_MAGIC) {
|
||||
throw new Error(`magic number for block answer is wrong : ${magic} ${NBD_REPLY_MAGIC}`)
|
||||
}
|
||||
|
||||
const error = buffer.readInt32BE(4)
|
||||
const error = await this.#readInt32()
|
||||
if (error !== 0) {
|
||||
// @todo use error code from constants.mjs
|
||||
throw new Error(`GOT ERROR CODE : ${error}`)
|
||||
}
|
||||
|
||||
const blockQueryId = buffer.readBigUInt64BE(8)
|
||||
const blockQueryId = await this.#readInt64()
|
||||
const query = this.#commandQueryBacklog.get(blockQueryId)
|
||||
if (!query) {
|
||||
throw new Error(` no query associated with id ${blockQueryId}`)
|
||||
@@ -289,7 +265,7 @@ export default class NbdClient {
|
||||
}
|
||||
}
|
||||
|
||||
async #readBlock(index, size) {
|
||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||
// we don't want to add anything in backlog while reconnecting
|
||||
if (this.#reconnectingPromise) {
|
||||
await this.#reconnectingPromise
|
||||
@@ -305,13 +281,7 @@ export default class NbdClient {
|
||||
buffer.writeInt16BE(NBD_CMD_READ, 6) // we want to read a data block
|
||||
buffer.writeBigUInt64BE(queryId, 8)
|
||||
// byte offset in the raw disk
|
||||
const offset = BigInt(index) * BigInt(size)
|
||||
const remaining = this.#exportSize - offset
|
||||
if (remaining < BigInt(size)) {
|
||||
size = Number(remaining)
|
||||
}
|
||||
|
||||
buffer.writeBigUInt64BE(offset, 16)
|
||||
buffer.writeBigUInt64BE(BigInt(index) * BigInt(size), 16)
|
||||
buffer.writeInt32BE(size, 24)
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
@@ -337,13 +307,45 @@ export default class NbdClient {
|
||||
})
|
||||
}
|
||||
|
||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||
return pRetry(() => this.#readBlock(index, size), {
|
||||
tries: this.#readBlockRetries,
|
||||
onRetry: async err => {
|
||||
warn('will retry reading block ', index, err)
|
||||
await this.reconnect()
|
||||
},
|
||||
})
|
||||
async *readBlocks(indexGenerator) {
|
||||
// default : read all blocks
|
||||
if (indexGenerator === undefined) {
|
||||
const exportSize = this.#exportSize
|
||||
const chunkSize = 2 * 1024 * 1024
|
||||
indexGenerator = function* () {
|
||||
const nbBlocks = Math.ceil(exportSize / chunkSize)
|
||||
for (let index = 0; index < nbBlocks; index++) {
|
||||
yield { index, size: chunkSize }
|
||||
}
|
||||
}
|
||||
}
|
||||
const readAhead = []
|
||||
const readAheadMaxLength = this.#readAhead
|
||||
const makeReadBlockPromise = (index, size) => {
|
||||
const promise = pRetry(() => this.readBlock(index, size), {
|
||||
tries: this.#readBlockRetries,
|
||||
onRetry: async err => {
|
||||
warn('will retry reading block ', index, err)
|
||||
await this.reconnect()
|
||||
},
|
||||
})
|
||||
// error is handled during unshift
|
||||
promise.catch(() => {})
|
||||
return promise
|
||||
}
|
||||
|
||||
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
|
||||
for (const { index, size } of indexGenerator()) {
|
||||
// stack readAheadMaxLength promises before starting to handle the results
|
||||
if (readAhead.length === readAheadMaxLength) {
|
||||
// any error will stop reading blocks
|
||||
yield readAhead.shift()
|
||||
}
|
||||
|
||||
readAhead.push(makeReadBlockPromise(index, size))
|
||||
}
|
||||
while (readAhead.length > 0) {
|
||||
yield readAhead.shift()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { NBD_DEFAULT_BLOCK_SIZE } from './constants.mjs'
|
||||
import NbdClient from './index.mjs'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
const { warn } = createLogger('vates:nbd-client:multi')
|
||||
export default class MultiNbdClient {
|
||||
#clients = []
|
||||
#readAhead
|
||||
|
||||
get exportSize() {
|
||||
return this.#clients[0].exportSize
|
||||
}
|
||||
|
||||
constructor(settings, { nbdConcurrency = 8, readAhead = 16, ...options } = {}) {
|
||||
this.#readAhead = readAhead
|
||||
if (!Array.isArray(settings)) {
|
||||
settings = [settings]
|
||||
}
|
||||
for (let i = 0; i < nbdConcurrency; i++) {
|
||||
this.#clients.push(
|
||||
new NbdClient(settings[i % settings.length], { ...options, readAhead: Math.ceil(readAhead / nbdConcurrency) })
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async connect() {
|
||||
const connectedClients = []
|
||||
for (const clientId in this.#clients) {
|
||||
const client = this.#clients[clientId]
|
||||
try {
|
||||
await client.connect()
|
||||
connectedClients.push(client)
|
||||
} catch (err) {
|
||||
client.disconnect().catch(() => {})
|
||||
warn(`can't connect to one nbd client`, { err })
|
||||
}
|
||||
}
|
||||
if (connectedClients.length === 0) {
|
||||
throw new Error(`Fail to connect to any Nbd client`)
|
||||
}
|
||||
if (connectedClients.length < this.#clients.length) {
|
||||
warn(
|
||||
`incomplete connection by multi Nbd, only ${connectedClients.length} over ${this.#clients.length} expected clients`
|
||||
)
|
||||
this.#clients = connectedClients
|
||||
}
|
||||
}
|
||||
|
||||
async disconnect() {
|
||||
await asyncEach(this.#clients, client => client.disconnect(), {
|
||||
stopOnError: false,
|
||||
})
|
||||
}
|
||||
|
||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||
const clientId = index % this.#clients.length
|
||||
return this.#clients[clientId].readBlock(index, size)
|
||||
}
|
||||
|
||||
async *readBlocks(indexGenerator) {
|
||||
// default : read all blocks
|
||||
const readAhead = []
|
||||
const makeReadBlockPromise = (index, size) => {
|
||||
const promise = this.readBlock(index, size)
|
||||
// error is handled during unshift
|
||||
promise.catch(() => {})
|
||||
return promise
|
||||
}
|
||||
|
||||
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
|
||||
for (const { index, size } of indexGenerator()) {
|
||||
// stack readAheadMaxLength promises before starting to handle the results
|
||||
if (readAhead.length === this.#readAhead) {
|
||||
// any error will stop reading blocks
|
||||
yield readAhead.shift()
|
||||
}
|
||||
|
||||
readAhead.push(makeReadBlockPromise(index, size))
|
||||
}
|
||||
while (readAhead.length > 0) {
|
||||
yield readAhead.shift()
|
||||
}
|
||||
}
|
||||
}
|
||||
76
@vates/nbd-client/nbdclient.integ.js
Normal file
76
@vates/nbd-client/nbdclient.integ.js
Normal file
@@ -0,0 +1,76 @@
|
||||
'use strict'
|
||||
const NbdClient = require('./index.js')
|
||||
const { spawn } = require('node:child_process')
|
||||
const fs = require('node:fs/promises')
|
||||
const { test } = require('tap')
|
||||
const tmp = require('tmp')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
|
||||
const FILE_SIZE = 2 * 1024 * 1024
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
const data = Buffer.alloc(size, 0)
|
||||
for (let i = 0; i < size; i += 4) {
|
||||
data.writeUInt32BE(i, i)
|
||||
}
|
||||
await fs.writeFile(tmpPath, data)
|
||||
|
||||
return tmpPath
|
||||
}
|
||||
|
||||
test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
const nbdServer = spawn(
|
||||
'nbdkit',
|
||||
[
|
||||
'file',
|
||||
path,
|
||||
'--newstyle', //
|
||||
'--exit-with-parent',
|
||||
'--read-only',
|
||||
'--export-name=MY_SECRET_EXPORT',
|
||||
],
|
||||
{
|
||||
stdio: ['inherit', 'inherit', 'inherit'],
|
||||
}
|
||||
)
|
||||
|
||||
const client = new NbdClient({
|
||||
address: 'localhost',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
secure: false,
|
||||
})
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const CHUNK_SIZE = 128 * 1024 // non default size
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
}
|
||||
// read mutiple blocks in parallel
|
||||
await asyncEach(
|
||||
indexes,
|
||||
async i => {
|
||||
const block = await client.readBlock(i, CHUNK_SIZE)
|
||||
let blockOk = true
|
||||
let firstFail
|
||||
for (let j = 0; j < CHUNK_SIZE; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
if (!blockOk && firstFail === undefined) {
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content`)
|
||||
},
|
||||
{ concurrency: 8 }
|
||||
)
|
||||
await client.disconnect()
|
||||
nbdServer.kill()
|
||||
await fs.unlink(path)
|
||||
})
|
||||
@@ -13,18 +13,17 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "3.0.0",
|
||||
"version": "1.2.0",
|
||||
"engines": {
|
||||
"node": ">=14.0"
|
||||
},
|
||||
"main": "./index.mjs",
|
||||
"dependencies": {
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/read-chunk": "^1.2.0",
|
||||
"@vates/read-chunk": "^1.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"xen-api": "^2.0.0"
|
||||
"xen-api": "^1.3.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.3.0",
|
||||
@@ -32,6 +31,6 @@
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test-integration": "tap --lines 97 --functions 95 --branches 74 --statements 97 tests/*.integ.mjs"
|
||||
"test-integration": "tap *.integ.js"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,182 +0,0 @@
|
||||
Public Key Info:
|
||||
Public Key Algorithm: RSA
|
||||
Key Security Level: High (3072 bits)
|
||||
|
||||
modulus:
|
||||
00:be:92:be:df:de:0a:ab:38:fc:1a:c0:1a:58:4d:86
|
||||
b8:1f:25:10:7d:19:05:17:bf:02:3d:e9:ef:f8:c0:04
|
||||
5d:6f:98:de:5c:dd:c3:0f:e2:61:61:e4:b5:9c:42:ac
|
||||
3e:af:fd:30:10:e1:54:32:66:75:f6:80:90:85:05:a0
|
||||
6a:14:a2:6f:a7:2e:f0:f3:52:94:2a:f2:34:fc:0d:b4
|
||||
fb:28:5d:1c:11:5c:59:6e:63:34:ba:b3:fd:73:b1:48
|
||||
35:00:84:53:da:6a:9b:84:ab:64:b1:a1:2b:3a:d1:5a
|
||||
d7:13:7c:12:2a:4e:72:e9:96:d6:30:74:c5:71:05:14
|
||||
4b:2d:01:94:23:67:4e:37:3c:1e:c1:a0:bc:34:04:25
|
||||
21:11:fb:4b:6b:53:74:8f:90:93:57:af:7f:3b:78:d6
|
||||
a4:87:fe:7d:ed:20:11:8b:70:54:67:b8:c9:f5:c0:6b
|
||||
de:4e:e7:a5:79:ff:f7:ad:cf:10:57:f5:51:70:7b:54
|
||||
68:28:9e:b9:c2:10:7b:ab:aa:11:47:9f:ec:e6:2f:09
|
||||
44:4a:88:5b:dd:8c:10:b4:c4:03:25:06:d9:e0:9f:a0
|
||||
0d:cf:94:4b:3b:fa:a5:17:2c:e4:67:c4:17:6a:ab:d8
|
||||
c8:7a:16:41:b9:91:b7:9c:ae:8c:94:be:26:61:51:71
|
||||
c1:a6:39:39:97:75:28:a9:0e:21:ea:f0:bd:71:4a:8c
|
||||
e1:f8:1d:a9:22:2f:10:a8:1b:e5:a4:9a:fd:0f:fa:c6
|
||||
20:bc:96:99:79:c6:ba:a4:1f:3e:d4:91:c5:af:bb:71
|
||||
0a:5a:ef:69:9c:64:69:ce:5a:fe:3f:c2:24:f4:26:d4
|
||||
3d:ab:ab:9a:f0:f6:f1:b1:64:a9:f4:e2:34:6a:ab:2e
|
||||
95:47:b9:07:5a:39:c6:95:9c:a9:e8:ed:71:dd:c1:21
|
||||
16:c8:2d:4c:2c:af:06:9d:c6:fa:fe:c5:2a:6c:b4:c3
|
||||
d5:96:fc:5e:fd:ec:1c:30:b4:9d:cb:29:ef:a8:50:1c
|
||||
21:
|
||||
|
||||
public exponent:
|
||||
01:00:01:
|
||||
|
||||
private exponent:
|
||||
25:37:c5:7d:35:01:02:65:73:9e:c9:cb:9b:59:30:a9
|
||||
3e:b3:df:5f:7f:06:66:97:d0:19:45:59:af:4b:d8:ce
|
||||
62:a0:09:35:3b:bd:ff:99:27:89:95:bf:fe:0f:6b:52
|
||||
26:ce:9c:97:7f:5a:11:29:bf:79:ef:ab:c9:be:ca:90
|
||||
4d:0d:58:1e:df:65:01:30:2c:6d:a2:b5:c4:4f:ec:fb
|
||||
6b:eb:9b:32:ac:c5:6e:70:83:78:be:f4:0d:a7:1e:c1
|
||||
f3:22:e4:b9:70:3e:85:0f:6f:ef:dc:d8:f3:78:b5:73
|
||||
f1:83:36:8c:fa:9b:28:91:63:ad:3c:f0:de:5c:ae:94
|
||||
eb:ea:36:03:20:06:bf:74:c7:50:eb:52:36:1a:65:21
|
||||
eb:40:17:7f:93:61:dd:33:d0:02:bc:ec:6d:31:f1:41
|
||||
5a:a9:d1:f0:00:66:4c:c4:18:47:d5:67:e3:cd:bb:83
|
||||
44:07:ab:62:83:21:dc:d8:e6:89:37:08:bb:9d:ea:62
|
||||
c2:5d:ce:85:c2:dc:48:27:0c:a4:23:61:b7:30:e7:26
|
||||
44:dc:1e:5c:2e:16:35:2b:2e:a6:e6:a4:ce:1f:9b:e9
|
||||
fe:96:fa:49:1d:fb:2a:df:bc:bf:46:da:52:f8:37:8a
|
||||
84:ab:e4:73:e6:46:56:b5:b4:3d:e1:63:eb:02:8e:d7
|
||||
67:96:c4:dc:28:6d:6b:b6:0c:a3:0b:db:87:29:ad:f9
|
||||
ec:73:b6:55:a3:40:32:13:84:c7:2f:33:74:04:dc:42
|
||||
00:11:9c:fb:fc:62:35:b3:82:c3:3c:28:80:e8:09:a8
|
||||
97:c7:c1:2e:3d:27:fa:4f:9b:fc:c2:34:58:41:5c:a1
|
||||
e2:70:2e:2f:82:ad:bd:bd:8e:dd:23:12:25:de:89:70
|
||||
60:75:48:90:80:ac:55:74:51:6f:49:9e:7f:63:41:8b
|
||||
3c:b1:f5:c3:6b:4b:5a:50:a6:4d:38:e8:82:c2:04:c8
|
||||
30:fd:06:9b:c1:04:27:b6:63:3a:5e:f5:4d:00:c3:d1
|
||||
|
||||
|
||||
prime1:
|
||||
00:f6:00:2e:7d:89:61:24:16:5e:87:ca:18:6c:03:b8
|
||||
b4:33:df:4a:a7:7f:db:ed:39:15:41:12:61:4f:4e:b4
|
||||
de:ab:29:d9:0c:6c:01:7e:53:2e:ee:e7:5f:a2:e4:6d
|
||||
c6:4b:07:4e:d8:a3:ae:45:06:97:bd:18:a3:e9:dd:29
|
||||
54:64:6d:f0:af:08:95:ae:ae:3e:71:63:76:2a:a1:18
|
||||
c4:b1:fc:bc:3d:42:15:74:b3:c5:38:1f:5d:92:f1:b2
|
||||
c6:3f:10:fe:35:1a:c6:b1:ce:70:38:ff:08:5c:de:61
|
||||
79:c7:50:91:22:4d:e9:c8:18:49:e2:5c:91:84:86:e2
|
||||
4d:0f:6e:9b:0d:81:df:aa:f3:59:75:56:e9:33:18:dd
|
||||
ab:39:da:e2:25:01:05:a1:6e:23:59:15:2c:89:35:c7
|
||||
ae:9c:c7:ea:88:9a:1a:f3:48:07:11:82:59:79:8c:62
|
||||
53:06:37:30:14:b3:82:b1:50:fc:ae:b8:f7:1c:57:44
|
||||
7d:
|
||||
|
||||
prime2:
|
||||
00:c6:51:cc:dc:88:2e:cf:98:90:10:19:e0:d3:a4:d1
|
||||
3f:dc:b0:29:d3:bb:26:ee:eb:00:17:17:d1:d1:bb:9b
|
||||
34:b1:4e:af:b5:6c:1c:54:53:b4:bb:55:da:f7:78:cd
|
||||
38:b4:2e:3a:8c:63:80:3b:64:9c:b4:2b:cd:dd:50:0b
|
||||
05:d2:00:7a:df:8e:c3:e6:29:e0:9c:d8:40:b7:11:09
|
||||
f4:38:df:f6:ed:93:1e:18:d4:93:fa:8d:ee:82:9c:0f
|
||||
c1:88:26:84:9d:4f:ae:8a:17:d5:55:54:4c:c6:0a:ac
|
||||
4d:ec:33:51:68:0f:4b:92:2e:04:57:fe:15:f5:00:46
|
||||
5c:8e:ad:09:2c:e7:df:d5:36:7a:4e:bd:da:21:22:d7
|
||||
58:b4:72:93:94:af:34:cc:e2:b8:d0:4f:0b:5d:97:08
|
||||
12:19:17:34:c5:15:49:00:48:56:13:b8:45:4e:3b:f8
|
||||
bc:d5:ab:d9:6d:c2:4a:cc:01:1a:53:4d:46:50:49:3b
|
||||
75:
|
||||
|
||||
coefficient:
|
||||
63:67:50:29:10:6a:85:a3:dc:51:90:20:76:86:8c:83
|
||||
8e:d5:ff:aa:75:fd:b5:f8:31:b0:96:6c:18:1d:5b:ed
|
||||
a4:2e:47:8d:9c:c2:1e:2c:a8:6d:4b:10:a5:c2:53:46
|
||||
8a:9a:84:91:d7:fc:f5:cc:03:ce:b9:3d:5c:01:d2:27
|
||||
99:7b:79:89:4f:a1:12:e3:05:5d:ee:10:f6:8c:e6:ce
|
||||
5e:da:32:56:6d:6f:eb:32:b4:75:7b:94:49:d8:2d:9e
|
||||
4d:19:59:2e:e4:0b:bc:95:df:df:65:67:a1:dd:c6:2b
|
||||
99:f4:76:e8:9f:fa:57:1d:ca:f9:58:a9:ce:9b:30:5c
|
||||
42:8a:ba:05:e7:e2:15:45:25:bc:e9:68:c1:8b:1a:37
|
||||
cc:e1:aa:45:2e:94:f5:81:47:1e:64:7f:c0:c1:b7:a8
|
||||
21:58:18:a9:a0:ed:e0:27:75:bf:65:81:6b:e4:1d:5a
|
||||
b7:7e:df:d8:28:c6:36:21:19:c8:6e:da:ca:9e:da:84
|
||||
|
||||
|
||||
exp1:
|
||||
00:ba:d7:fe:77:a9:0d:98:2c:49:56:57:c0:5e:e2:20
|
||||
ba:f6:1f:26:03:bc:d0:5d:08:9b:45:16:61:c4:ab:e2
|
||||
22:b1:dc:92:17:a6:3d:28:26:a4:22:1e:a8:7b:ff:86
|
||||
05:33:5d:74:9c:85:0d:cb:2d:ab:b8:9b:6b:7c:28:57
|
||||
c8:da:92:ca:59:17:6b:21:07:05:34:78:37:fb:3e:ea
|
||||
a2:13:12:04:23:7e:fa:ee:ed:cf:e0:c5:a9:fb:ff:0a
|
||||
2b:1b:21:9c:02:d7:b8:8c:ba:60:70:59:fc:8f:14:f4
|
||||
f2:5a:d9:ad:b2:61:7d:2c:56:8e:5f:98:b1:89:f8:2d
|
||||
10:1c:a5:84:ad:28:b4:aa:92:34:a3:34:04:e1:a3:84
|
||||
52:16:1a:52:e3:8a:38:2d:99:8a:cd:91:90:87:12:ca
|
||||
fc:ab:e6:08:14:03:00:6f:41:88:e4:da:9d:7c:fd:8c
|
||||
7c:c4:de:cb:ed:1d:3f:29:d0:7a:6b:76:df:71:ae:32
|
||||
bd:
|
||||
|
||||
exp2:
|
||||
4a:e9:d3:6c:ea:b4:64:0e:c9:3c:8b:c9:f5:a8:a8:b2
|
||||
6a:f6:d0:95:fe:78:32:7f:ea:c4:ce:66:9f:c7:32:55
|
||||
b1:34:7c:03:18:17:8b:73:23:2e:30:bc:4a:07:03:de
|
||||
8b:91:7a:e4:55:21:b7:4d:c6:33:f8:e8:06:d5:99:94
|
||||
55:43:81:26:b9:93:1e:7a:6b:32:54:2d:fd:f9:1d:bd
|
||||
77:4e:82:c4:33:72:87:06:a5:ef:5b:75:e1:38:7a:6b
|
||||
2c:b7:00:19:3c:64:3e:1d:ca:a4:34:f7:db:47:64:d6
|
||||
fa:86:58:15:ea:d1:2d:22:dc:d9:30:4d:b3:02:ab:91
|
||||
83:03:b2:17:98:6f:60:e6:f7:44:8f:4a:ba:81:a2:bf
|
||||
0b:4a:cc:9c:b9:a2:44:52:d0:65:3f:b6:97:5f:d9:d8
|
||||
9c:49:bb:d1:46:bd:10:b2:42:71:a8:85:e5:8b:99:e6
|
||||
1b:00:93:5d:76:ab:32:6c:a8:39:17:53:9c:38:4d:91
|
||||
|
||||
|
||||
|
||||
Public Key PIN:
|
||||
pin-sha256:ISh/UeFjUG5Gwrpx6hMUGQPvg9wOKjOkHmRbs4YjZqs=
|
||||
Public Key ID:
|
||||
sha256:21287f51e163506e46c2ba71ea13141903ef83dc0e2a33a41e645bb3862366ab
|
||||
sha1:1a48455111ac45fb5807c5cdb7b20b896c52f0b6
|
||||
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG4wIBAAKCAYEAvpK+394Kqzj8GsAaWE2GuB8lEH0ZBRe/Aj3p7/jABF1vmN5c
|
||||
3cMP4mFh5LWcQqw+r/0wEOFUMmZ19oCQhQWgahSib6cu8PNSlCryNPwNtPsoXRwR
|
||||
XFluYzS6s/1zsUg1AIRT2mqbhKtksaErOtFa1xN8EipOcumW1jB0xXEFFEstAZQj
|
||||
Z043PB7BoLw0BCUhEftLa1N0j5CTV69/O3jWpIf+fe0gEYtwVGe4yfXAa95O56V5
|
||||
//etzxBX9VFwe1RoKJ65whB7q6oRR5/s5i8JREqIW92MELTEAyUG2eCfoA3PlEs7
|
||||
+qUXLORnxBdqq9jIehZBuZG3nK6MlL4mYVFxwaY5OZd1KKkOIerwvXFKjOH4Haki
|
||||
LxCoG+Wkmv0P+sYgvJaZeca6pB8+1JHFr7txClrvaZxkac5a/j/CJPQm1D2rq5rw
|
||||
9vGxZKn04jRqqy6VR7kHWjnGlZyp6O1x3cEhFsgtTCyvBp3G+v7FKmy0w9WW/F79
|
||||
7BwwtJ3LKe+oUBwhAgMBAAECggGAJTfFfTUBAmVznsnLm1kwqT6z319/BmaX0BlF
|
||||
Wa9L2M5ioAk1O73/mSeJlb/+D2tSJs6cl39aESm/ee+ryb7KkE0NWB7fZQEwLG2i
|
||||
tcRP7Ptr65syrMVucIN4vvQNpx7B8yLkuXA+hQ9v79zY83i1c/GDNoz6myiRY608
|
||||
8N5crpTr6jYDIAa/dMdQ61I2GmUh60AXf5Nh3TPQArzsbTHxQVqp0fAAZkzEGEfV
|
||||
Z+PNu4NEB6tigyHc2OaJNwi7nepiwl3OhcLcSCcMpCNhtzDnJkTcHlwuFjUrLqbm
|
||||
pM4fm+n+lvpJHfsq37y/RtpS+DeKhKvkc+ZGVrW0PeFj6wKO12eWxNwobWu2DKML
|
||||
24cprfnsc7ZVo0AyE4THLzN0BNxCABGc+/xiNbOCwzwogOgJqJfHwS49J/pPm/zC
|
||||
NFhBXKHicC4vgq29vY7dIxIl3olwYHVIkICsVXRRb0mef2NBizyx9cNrS1pQpk04
|
||||
6ILCBMgw/QabwQQntmM6XvVNAMPRAoHBAPYALn2JYSQWXofKGGwDuLQz30qnf9vt
|
||||
ORVBEmFPTrTeqynZDGwBflMu7udfouRtxksHTtijrkUGl70Yo+ndKVRkbfCvCJWu
|
||||
rj5xY3YqoRjEsfy8PUIVdLPFOB9dkvGyxj8Q/jUaxrHOcDj/CFzeYXnHUJEiTenI
|
||||
GEniXJGEhuJND26bDYHfqvNZdVbpMxjdqzna4iUBBaFuI1kVLIk1x66cx+qImhrz
|
||||
SAcRgll5jGJTBjcwFLOCsVD8rrj3HFdEfQKBwQDGUczciC7PmJAQGeDTpNE/3LAp
|
||||
07sm7usAFxfR0bubNLFOr7VsHFRTtLtV2vd4zTi0LjqMY4A7ZJy0K83dUAsF0gB6
|
||||
347D5ingnNhAtxEJ9Djf9u2THhjUk/qN7oKcD8GIJoSdT66KF9VVVEzGCqxN7DNR
|
||||
aA9Lki4EV/4V9QBGXI6tCSzn39U2ek692iEi11i0cpOUrzTM4rjQTwtdlwgSGRc0
|
||||
xRVJAEhWE7hFTjv4vNWr2W3CSswBGlNNRlBJO3UCgcEAutf+d6kNmCxJVlfAXuIg
|
||||
uvYfJgO80F0Im0UWYcSr4iKx3JIXpj0oJqQiHqh7/4YFM110nIUNyy2ruJtrfChX
|
||||
yNqSylkXayEHBTR4N/s+6qITEgQjfvru7c/gxan7/worGyGcAte4jLpgcFn8jxT0
|
||||
8lrZrbJhfSxWjl+YsYn4LRAcpYStKLSqkjSjNATho4RSFhpS44o4LZmKzZGQhxLK
|
||||
/KvmCBQDAG9BiOTanXz9jHzE3svtHT8p0Hprdt9xrjK9AoHASunTbOq0ZA7JPIvJ
|
||||
9aiosmr20JX+eDJ/6sTOZp/HMlWxNHwDGBeLcyMuMLxKBwPei5F65FUht03GM/jo
|
||||
BtWZlFVDgSa5kx56azJULf35Hb13ToLEM3KHBqXvW3XhOHprLLcAGTxkPh3KpDT3
|
||||
20dk1vqGWBXq0S0i3NkwTbMCq5GDA7IXmG9g5vdEj0q6gaK/C0rMnLmiRFLQZT+2
|
||||
l1/Z2JxJu9FGvRCyQnGoheWLmeYbAJNddqsybKg5F1OcOE2RAoHAY2dQKRBqhaPc
|
||||
UZAgdoaMg47V/6p1/bX4MbCWbBgdW+2kLkeNnMIeLKhtSxClwlNGipqEkdf89cwD
|
||||
zrk9XAHSJ5l7eYlPoRLjBV3uEPaM5s5e2jJWbW/rMrR1e5RJ2C2eTRlZLuQLvJXf
|
||||
32Vnod3GK5n0duif+lcdyvlYqc6bMFxCiroF5+IVRSW86WjBixo3zOGqRS6U9YFH
|
||||
HmR/wMG3qCFYGKmg7eAndb9lgWvkHVq3ft/YKMY2IRnIbtrKntqE
|
||||
-----END RSA PRIVATE KEY-----
|
||||
@@ -1,158 +0,0 @@
|
||||
import { spawn, exec } from 'node:child_process'
|
||||
import fs from 'node:fs/promises'
|
||||
import { test } from 'tap'
|
||||
import tmp from 'tmp'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { Socket } from 'node:net'
|
||||
import { NBD_DEFAULT_PORT } from '../constants.mjs'
|
||||
import assert from 'node:assert'
|
||||
import MultiNbdClient from '../multi.mjs'
|
||||
|
||||
const CHUNK_SIZE = 1024 * 1024 // non default size
|
||||
const FILE_SIZE = 1024 * 1024 * 9.5 // non aligned file size
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
const data = Buffer.alloc(size, 0)
|
||||
for (let i = 0; i < size; i += 4) {
|
||||
data.writeUInt32BE(i, i)
|
||||
}
|
||||
await fs.writeFile(tmpPath, data)
|
||||
|
||||
return tmpPath
|
||||
}
|
||||
|
||||
async function spawnNbdKit(path) {
|
||||
let tries = 5
|
||||
// wait for server to be ready
|
||||
|
||||
const nbdServer = spawn(
|
||||
'nbdkit',
|
||||
[
|
||||
'file',
|
||||
path,
|
||||
'--newstyle', //
|
||||
'--exit-with-parent',
|
||||
'--read-only',
|
||||
'--export-name=MY_SECRET_EXPORT',
|
||||
'--tls=on',
|
||||
'--tls-certificates=./tests/',
|
||||
// '--tls-verify-peer',
|
||||
// '--verbose',
|
||||
'--exit-with-parent',
|
||||
],
|
||||
{
|
||||
stdio: ['inherit', 'inherit', 'inherit'],
|
||||
}
|
||||
)
|
||||
nbdServer.on('error', err => {
|
||||
console.error(err)
|
||||
})
|
||||
do {
|
||||
try {
|
||||
const socket = new Socket()
|
||||
await new Promise((resolve, reject) => {
|
||||
socket.connect(NBD_DEFAULT_PORT, 'localhost')
|
||||
socket.once('error', reject)
|
||||
socket.once('connect', resolve)
|
||||
})
|
||||
socket.destroy()
|
||||
break
|
||||
} catch (err) {
|
||||
tries--
|
||||
if (tries <= 0) {
|
||||
throw err
|
||||
} else {
|
||||
await new Promise(resolve => setTimeout(resolve, 1000))
|
||||
}
|
||||
}
|
||||
} while (true)
|
||||
return nbdServer
|
||||
}
|
||||
|
||||
async function killNbdKit() {
|
||||
return new Promise((resolve, reject) =>
|
||||
exec('pkill -9 -f -o nbdkit', err => {
|
||||
err ? reject(err) : resolve()
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
let nbdServer = await spawnNbdKit(path)
|
||||
const client = new MultiNbdClient(
|
||||
{
|
||||
address: '127.0.0.1',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
cert: `-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUeHpQ0IeD6BmP2zgsv3LV3J4BI/EwDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA1MTcxMzU1MzBaFw0yNDA1
|
||||
MTYxMzU1MzBaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQC/8wLopj/iZY6ijmpvgCJsl+zY0hQZQcIoaCs0H75u
|
||||
8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZolevaSJLNT2Iolscvc2W9NCF4N1V6y
|
||||
zs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh67u+uI40732AfQqD01BNCTD/uHRB
|
||||
lKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y2SJVTeT4a1sSJixl6I1YPmt80FJh
|
||||
gq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULwdJOGgmqGRDzgZKJS5UUpxe/ViEO4
|
||||
59I18vIkgibaRYhENgmnP3lIzTOLlUe07tbSML5RGBbBAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBR/8+zYoL0H0LdWfULHg1LynFdSbzAfBgNVHSMEGDAWgBR/8+zYoL0H0LdW
|
||||
fULHg1LynFdSbzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBD
|
||||
OF5bTmbDEGoZ6OuQaI0vyya/T4FeaoWmh22gLeL6dEEmUVGJ1NyMTOvG9GiGJ8OM
|
||||
QhD1uHJei45/bXOYIDGey2+LwLWye7T4vtRFhf8amYh0ReyP/NV4/JoR/U3pTSH6
|
||||
tns7GZ4YWdwUhvOOlm17EQKVO/hP3t9mp74gcjdL4bCe5MYSheKuNACAakC1OR0U
|
||||
ZakJMP9ijvQuq8spfCzrK+NbHKNHR9tEgQw+ax/t1Au4dGVtFbcoxqCrx2kTl0RP
|
||||
CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
57ABhfYpMlZ4aHjuN1bL
|
||||
-----END CERTIFICATE-----
|
||||
`,
|
||||
},
|
||||
{
|
||||
nbdConcurrency: 1,
|
||||
readAhead: 2,
|
||||
}
|
||||
)
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
}
|
||||
const nbdIterator = client.readBlocks(function* () {
|
||||
for (const index of indexes) {
|
||||
yield { index, size: CHUNK_SIZE }
|
||||
}
|
||||
})
|
||||
let i = 0
|
||||
for await (const block of nbdIterator) {
|
||||
let blockOk = block.length === Math.min(CHUNK_SIZE, FILE_SIZE - CHUNK_SIZE * i)
|
||||
let firstFail
|
||||
for (let j = 0; j < block.length; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
if (!blockOk && firstFail === undefined) {
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content ${block.length}`)
|
||||
i++
|
||||
|
||||
// flaky server is flaky
|
||||
if (i % 7 === 0) {
|
||||
// kill the older nbdkit process
|
||||
await killNbdKit()
|
||||
nbdServer = await spawnNbdKit(path)
|
||||
}
|
||||
}
|
||||
assert.rejects(() => client.readBlock(100, CHUNK_SIZE))
|
||||
|
||||
await client.disconnect()
|
||||
// double disconnection shouldn't pose any problem
|
||||
await client.disconnect()
|
||||
nbdServer.kill()
|
||||
await fs.unlink(path)
|
||||
})
|
||||
@@ -1,21 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUeHpQ0IeD6BmP2zgsv3LV3J4BI/EwDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA1MTcxMzU1MzBaFw0yNDA1
|
||||
MTYxMzU1MzBaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQC/8wLopj/iZY6ijmpvgCJsl+zY0hQZQcIoaCs0H75u
|
||||
8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZolevaSJLNT2Iolscvc2W9NCF4N1V6y
|
||||
zs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh67u+uI40732AfQqD01BNCTD/uHRB
|
||||
lKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y2SJVTeT4a1sSJixl6I1YPmt80FJh
|
||||
gq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULwdJOGgmqGRDzgZKJS5UUpxe/ViEO4
|
||||
59I18vIkgibaRYhENgmnP3lIzTOLlUe07tbSML5RGBbBAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBR/8+zYoL0H0LdWfULHg1LynFdSbzAfBgNVHSMEGDAWgBR/8+zYoL0H0LdW
|
||||
fULHg1LynFdSbzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBD
|
||||
OF5bTmbDEGoZ6OuQaI0vyya/T4FeaoWmh22gLeL6dEEmUVGJ1NyMTOvG9GiGJ8OM
|
||||
QhD1uHJei45/bXOYIDGey2+LwLWye7T4vtRFhf8amYh0ReyP/NV4/JoR/U3pTSH6
|
||||
tns7GZ4YWdwUhvOOlm17EQKVO/hP3t9mp74gcjdL4bCe5MYSheKuNACAakC1OR0U
|
||||
ZakJMP9ijvQuq8spfCzrK+NbHKNHR9tEgQw+ax/t1Au4dGVtFbcoxqCrx2kTl0RP
|
||||
CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
57ABhfYpMlZ4aHjuN1bL
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,28 +0,0 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC/8wLopj/iZY6i
|
||||
jmpvgCJsl+zY0hQZQcIoaCs0H75u8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZol
|
||||
evaSJLNT2Iolscvc2W9NCF4N1V6yzs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh
|
||||
67u+uI40732AfQqD01BNCTD/uHRBlKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y
|
||||
2SJVTeT4a1sSJixl6I1YPmt80FJhgq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULw
|
||||
dJOGgmqGRDzgZKJS5UUpxe/ViEO459I18vIkgibaRYhENgmnP3lIzTOLlUe07tbS
|
||||
ML5RGBbBAgMBAAECggEATLYiafcTHfgnZmjTOad0WoDnC4n9tVBV948WARlUooLS
|
||||
duL3RQRHCLz9/ZaTuFA1XDpNcYyc/B/IZoU7aJGZR3+JSmJBjowpUphu+klVNNG4
|
||||
i6lDRrzYlUI0hfdLjHsDTDBIKi91KcB0lix/VkvsrVQvDHwsiR2ZAIiVWAWQFKrR
|
||||
5O3DhSTHbqyq47uR58rWr4Zf3zvZaUl841AS1yELzCiZqz7AenvyWphim0c0XA5d
|
||||
I63CEShntHnEAA9OMcP8+BNf/3AmqB4welY+m8elB3aJNH+j7DKq/AWqaM5nl2PC
|
||||
cS6qgpxwOyTxEOyj1xhwK5ZMRR3heW3NfutIxSOPlwKBgQDB9ZkrBeeGVtCISO7C
|
||||
eCANzSLpeVrahTvaCSQLdPHsLRLDUc+5mxdpi3CaRlzYs3S1OWdAtyWX9mBryltF
|
||||
qDPhCNjFDyHok4D3wLEWdS9oUVwEKUM8fOPW3tXLLiMM7p4862Qo7LqnqHzPqsnz
|
||||
22iZo5yjcc7aLJ+VmFrbAowwOwKBgQD9WNCvczTd7Ymn7zEvdiAyNoS0OZ0orwEJ
|
||||
zGaxtjqVguGklNfrb/UB+eKNGE80+YnMiSaFc9IQPetLntZdV0L7kWYdCI8kGDNA
|
||||
DbVRCOp+z8DwAojlrb/zsYu23anQozT3WeHxVU66lNuyEQvSW2tJa8gN1htrD7uY
|
||||
5KLibYrBMwKBgEM0iiHyJcrSgeb2/mO7o7+keJhVSDm3OInP6QFfQAQJihrLWiKB
|
||||
rpcPjbCm+LzNUX8JqNEvpIMHB1nR/9Ye9frfSdzd5W3kzicKSVHywL5wkmWOtpFa
|
||||
5Mcq5wFDtzlf5MxO86GKhRJauwRptRgdyhySKFApuva1x4XaCIEiXNjJAoGBAN82
|
||||
t3c+HCBEv3o05rMYcrmLC1T3Rh6oQlPtwbVmByvfywsFEVCgrc/16MPD3VWhXuXV
|
||||
GRmPuE8THxLbead30M5xhvShq+xzXgRbj5s8Lc9ZIHbW5OLoOS1vCtgtaQcoJOyi
|
||||
Rs4pCVqe+QpktnO6lEZ2Libys+maTQEiwNibBxu9AoGAUG1V5aKMoXa7pmGeuFR6
|
||||
ES+1NDiCt6yDq9BsLZ+e2uqvWTkvTGLLwvH6xf9a0pnnILd0AUTKAAaoUdZS6++E
|
||||
cGob7fxMwEE+UETp0QBgLtfjtExMOFwr2avw8PV4CYEUkPUAm2OFB2Twh+d/PNfr
|
||||
FAxF1rN47SBPNbFI8N4TFsg=
|
||||
-----END PRIVATE KEY-----
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,22 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 reedog117
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
forked from https://github.com/reedog117/node-vsphere-soap
|
||||
|
||||
# node-vsphere-soap
|
||||
|
||||
[](https://gitter.im/reedog117/node-vsphere-soap?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
This is a Node.js module to connect to VMware vCenter servers and/or ESXi hosts and perform operations using the [vSphere Web Services API]. If you're feeling really adventurous, you can use this module to port vSphere operations from other languages (such as the Perl, Python, and Go libraries that exist) and have fully native Node.js code controlling your VMware virtual infrastructure!
|
||||
|
||||
This is very much in alpha.
|
||||
|
||||
## Authors
|
||||
|
||||
- Patrick C - [@reedog117]
|
||||
|
||||
## Version
|
||||
|
||||
0.0.2-5
|
||||
|
||||
## Installation
|
||||
|
||||
```sh
|
||||
$ npm install node-vsphere-soap --save
|
||||
```
|
||||
|
||||
## Sample Code
|
||||
|
||||
### To connect to a vCenter server:
|
||||
|
||||
var nvs = require('node-vsphere-soap');
|
||||
var vc = new nvs.Client(host, user, password, sslVerify);
|
||||
vc.once('ready', function() {
|
||||
// perform work here
|
||||
});
|
||||
vc.once('error', function(err) {
|
||||
// handle error here
|
||||
});
|
||||
|
||||
#### Arguments
|
||||
|
||||
- host = hostname or IP of vCenter/ESX/ESXi server
|
||||
- user = username
|
||||
- password = password
|
||||
- sslVerify = true|false - set to false if you have self-signed/unverified certificates
|
||||
|
||||
#### Events
|
||||
|
||||
- ready = emits when session authenticated with server
|
||||
- error = emits when there's an error
|
||||
- _err_ contains the error
|
||||
|
||||
#### Client instance variables
|
||||
|
||||
- serviceContent - ServiceContent object retrieved by RetrieveServiceContent API call
|
||||
- userName - username of authenticated user
|
||||
- fullName - full name of authenticated user
|
||||
|
||||
### To run a command:
|
||||
|
||||
var vcCmd = vc.runCommand( commandToRun, arguments );
|
||||
vcCmd.once('result', function( result, raw, soapHeader) {
|
||||
// handle results
|
||||
});
|
||||
vcCmd.once('error', function( err) {
|
||||
// handle errors
|
||||
});
|
||||
|
||||
#### Arguments
|
||||
|
||||
- commandToRun = Method from the vSphere API
|
||||
- arguments = JSON document containing arguments to send
|
||||
|
||||
#### Events
|
||||
|
||||
- result = emits when session authenticated with server
|
||||
- _result_ contains the JSON-formatted result from the server
|
||||
- _raw_ contains the raw SOAP XML response from the server
|
||||
- _soapHeader_ contains any soapHeaders from the server
|
||||
- error = emits when there's an error
|
||||
- _err_ contains the error
|
||||
|
||||
Make sure you check out tests/vsphere-soap.test.js for examples on how to create commands to run
|
||||
|
||||
## Development
|
||||
|
||||
node-vsphere-soap uses a number of open source projects to work properly:
|
||||
|
||||
- [node.js] - evented I/O for the backend
|
||||
- [node-soap] - SOAP client for Node.js
|
||||
- [soap-cookie] - cookie authentication for the node-soap module
|
||||
- [lodash] - for quickly manipulating JSON
|
||||
- [lab] - testing engine
|
||||
- [code] - assertion engine used with lab
|
||||
|
||||
Want to contribute? Great!
|
||||
|
||||
### Todo's
|
||||
|
||||
- Write More Tests
|
||||
- Create Travis CI test harness with a fake vCenter Instance
|
||||
- Add Code Comments
|
||||
|
||||
### Testing
|
||||
|
||||
I have been testing on a Mac with node v0.10.36 and both ESXi and vCenter 5.5.
|
||||
|
||||
To edit tests, edit the file **test/vsphere-soap.test.js**
|
||||
|
||||
To point the module at your own vCenter/ESXi host, edit **config-test.stub.js** and save it as **config-test.js**
|
||||
|
||||
To run test scripts:
|
||||
|
||||
```sh
|
||||
$ npm test
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
[vSphere Web Services API]: http://pubs.vmware.com/vsphere-55/topic/com.vmware.wssdk.apiref.doc/right-pane.html
|
||||
[node-soap]: https://github.com/vpulim/node-soap
|
||||
[node.js]: http://nodejs.org/
|
||||
[soap-cookie]: https://github.com/shanestillwell/soap-cookie
|
||||
[code]: https://github.com/hapijs/code
|
||||
[lab]: https://github.com/hapijs/lab
|
||||
[lodash]: https://lodash.com/
|
||||
[@reedog117]: http://www.twitter.com/reedog117
|
||||
@@ -1,230 +0,0 @@
|
||||
/*
|
||||
|
||||
node-vsphere-soap
|
||||
|
||||
client.js
|
||||
|
||||
This file creates the Client class
|
||||
|
||||
- when the class is instantiated, a connection will be made to the ESXi/vCenter server to verify that the creds are good
|
||||
- upon a bad login, the connnection will be terminated
|
||||
|
||||
*/
|
||||
|
||||
import { EventEmitter } from 'events'
|
||||
import axios from 'axios'
|
||||
import https from 'node:https'
|
||||
import util from 'util'
|
||||
import soap from 'soap'
|
||||
import Cookie from 'soap-cookie' // required for session persistence
|
||||
|
||||
// Client class
|
||||
// inherits from EventEmitter
|
||||
// possible events: connect, error, ready
|
||||
|
||||
export function Client(vCenterHostname, username, password, sslVerify) {
|
||||
this.status = 'disconnected'
|
||||
this.reconnectCount = 0
|
||||
|
||||
sslVerify = typeof sslVerify !== 'undefined' ? sslVerify : false
|
||||
|
||||
EventEmitter.call(this)
|
||||
|
||||
// sslVerify argument handling
|
||||
if (sslVerify) {
|
||||
this.clientopts = {}
|
||||
} else {
|
||||
this.clientopts = {
|
||||
request: axios.create({
|
||||
httpsAgent: new https.Agent({
|
||||
rejectUnauthorized: false,
|
||||
}),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
this.connectionInfo = {
|
||||
host: vCenterHostname,
|
||||
user: username,
|
||||
password,
|
||||
sslVerify,
|
||||
}
|
||||
|
||||
this._loginArgs = {
|
||||
userName: this.connectionInfo.user,
|
||||
password: this.connectionInfo.password,
|
||||
}
|
||||
|
||||
this._vcUrl = 'https://' + this.connectionInfo.host + '/sdk/vimService.wsdl'
|
||||
|
||||
// connect to the vCenter / ESXi host
|
||||
this.on('connect', this._connect)
|
||||
this.emit('connect')
|
||||
|
||||
// close session
|
||||
this.on('close', this._close)
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
util.inherits(Client, EventEmitter)
|
||||
|
||||
Client.prototype.runCommand = function (command, args) {
|
||||
const self = this
|
||||
let cmdargs
|
||||
if (!args || args === null) {
|
||||
cmdargs = {}
|
||||
} else {
|
||||
cmdargs = args
|
||||
}
|
||||
|
||||
const emitter = new EventEmitter()
|
||||
|
||||
// check if client has successfully connected
|
||||
if (self.status === 'ready' || self.status === 'connecting') {
|
||||
self.client.VimService.VimPort[command](cmdargs, function (err, result, raw, soapHeader) {
|
||||
if (err) {
|
||||
_soapErrorHandler(self, emitter, command, cmdargs, err)
|
||||
}
|
||||
if (command === 'Logout') {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
}
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
} else {
|
||||
// if connection not ready or connecting, reconnect to instance
|
||||
if (self.status === 'disconnected') {
|
||||
self.emit('connect')
|
||||
}
|
||||
self.once('ready', function () {
|
||||
self.client.VimService.VimPort[command](cmdargs, function (err, result, raw, soapHeader) {
|
||||
if (err) {
|
||||
_soapErrorHandler(self, emitter, command, cmdargs, err)
|
||||
}
|
||||
if (command === 'Logout') {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
}
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
return emitter
|
||||
}
|
||||
|
||||
Client.prototype.close = function () {
|
||||
const self = this
|
||||
|
||||
self.emit('close')
|
||||
}
|
||||
|
||||
Client.prototype._connect = function () {
|
||||
const self = this
|
||||
|
||||
if (self.status !== 'disconnected') {
|
||||
return
|
||||
}
|
||||
|
||||
self.status = 'connecting'
|
||||
|
||||
soap.createClient(
|
||||
self._vcUrl,
|
||||
self.clientopts,
|
||||
function (err, client) {
|
||||
if (err) {
|
||||
self.emit('error', err)
|
||||
throw err
|
||||
}
|
||||
|
||||
self.client = client // save client for later use
|
||||
|
||||
self
|
||||
.runCommand('RetrieveServiceContent', { _this: 'ServiceInstance' })
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
if (!result.returnval) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', raw)
|
||||
return
|
||||
}
|
||||
|
||||
self.serviceContent = result.returnval
|
||||
self.sessionManager = result.returnval.sessionManager
|
||||
const loginArgs = { _this: self.sessionManager, ...self._loginArgs }
|
||||
|
||||
self
|
||||
.runCommand('Login', loginArgs)
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
self.authCookie = new Cookie(client.lastResponseHeaders)
|
||||
self.client.setSecurity(self.authCookie) // needed since vSphere SOAP WS uses cookies
|
||||
|
||||
self.userName = result.returnval.userName
|
||||
self.fullName = result.returnval.fullName
|
||||
self.reconnectCount = 0
|
||||
|
||||
self.status = 'ready'
|
||||
self.emit('ready')
|
||||
process.once('beforeExit', self._close)
|
||||
})
|
||||
.once('error', function (err) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', err)
|
||||
})
|
||||
})
|
||||
.once('error', function (err) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', err)
|
||||
})
|
||||
},
|
||||
self._vcUrl
|
||||
)
|
||||
}
|
||||
|
||||
Client.prototype._close = function () {
|
||||
const self = this
|
||||
|
||||
if (self.status === 'ready') {
|
||||
self
|
||||
.runCommand('Logout', { _this: self.sessionManager })
|
||||
.once('result', function () {
|
||||
self.status = 'disconnected'
|
||||
})
|
||||
.once('error', function () {
|
||||
/* don't care of error during disconnection */
|
||||
self.status = 'disconnected'
|
||||
})
|
||||
} else {
|
||||
self.status = 'disconnected'
|
||||
}
|
||||
}
|
||||
|
||||
function _soapErrorHandler(self, emitter, command, args, err) {
|
||||
err = err || { body: 'general error' }
|
||||
|
||||
if (err.body.match(/session is not authenticated/)) {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
|
||||
if (self.reconnectCount < 10) {
|
||||
self.reconnectCount += 1
|
||||
self
|
||||
.runCommand(command, args)
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
.once('error', function (err) {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
})
|
||||
} else {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
}
|
||||
} else {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
// end
|
||||
@@ -1,38 +0,0 @@
|
||||
{
|
||||
"name": "@vates/node-vsphere-soap",
|
||||
"version": "2.0.0",
|
||||
"description": "interface to vSphere SOAP/WSDL from node for interfacing with vCenter or ESXi, forked from node-vsphere-soap",
|
||||
"main": "lib/client.mjs",
|
||||
"author": "reedog117",
|
||||
"repository": {
|
||||
"directory": "@vates/node-vsphere-soap",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"axios": "^1.4.0",
|
||||
"soap": "^1.0.0",
|
||||
"soap-cookie": "^0.10.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
},
|
||||
"keywords": [
|
||||
"vsphere",
|
||||
"vcenter",
|
||||
"api",
|
||||
"soap",
|
||||
"wsdl"
|
||||
],
|
||||
"preferGlobal": false,
|
||||
"license": "MIT",
|
||||
"private": false,
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/node-vsphere-soap",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
// place your own credentials here for a vCenter or ESXi server
|
||||
// this information will be used for connecting to a vCenter instance
|
||||
// for module testing
|
||||
// name the file config-test.js
|
||||
|
||||
export const vCenterTestCreds = {
|
||||
vCenterIP: 'vcsa',
|
||||
vCenterUser: 'vcuser',
|
||||
vCenterPassword: 'vcpw',
|
||||
vCenter: true,
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
/*
|
||||
vsphere-soap.test.js
|
||||
|
||||
tests for the vCenterConnectionInstance class
|
||||
*/
|
||||
|
||||
import assert from 'assert'
|
||||
import { describe, it } from 'test'
|
||||
|
||||
import * as vc from '../lib/client.mjs'
|
||||
|
||||
// eslint-disable-next-line n/no-missing-import
|
||||
import { vCenterTestCreds as TestCreds } from '../config-test.mjs'
|
||||
|
||||
const VItest = new vc.Client(TestCreds.vCenterIP, TestCreds.vCenterUser, TestCreds.vCenterPassword, false)
|
||||
|
||||
describe('Client object initialization:', function () {
|
||||
it('provides a successful login', { timeout: 5000 }, function (t, done) {
|
||||
VItest.once('ready', function () {
|
||||
assert.notEqual(VItest.userName, null)
|
||||
assert.notEqual(VItest.fullName, null)
|
||||
assert.notEqual(VItest.serviceContent, null)
|
||||
done()
|
||||
}).once('error', function (err) {
|
||||
console.error(err)
|
||||
// this should fail if there's a problem
|
||||
assert.notEqual(VItest.userName, null)
|
||||
assert.notEqual(VItest.fullName, null)
|
||||
assert.notEqual(VItest.serviceContent, null)
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Client reconnection test:', function () {
|
||||
it('can successfully reconnect', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('Logout', { _this: VItest.serviceContent.sessionManager })
|
||||
.once('result', function (result) {
|
||||
// now we're logged out, so let's try running a command to test automatic re-login
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' })
|
||||
.once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error(err)
|
||||
})
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error(err)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// these tests don't work yet
|
||||
describe('Client tests - query commands:', function () {
|
||||
it('retrieves current time', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' }).once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it('retrieves current time 2 (check for event clobbering)', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' }).once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it('can obtain the names of all Virtual Machines in the inventory', { timeout: 20000 }, function (t, done) {
|
||||
// get property collector
|
||||
const propertyCollector = VItest.serviceContent.propertyCollector
|
||||
// get view manager
|
||||
const viewManager = VItest.serviceContent.viewManager
|
||||
// get root folder
|
||||
const rootFolder = VItest.serviceContent.rootFolder
|
||||
|
||||
let containerView, objectSpec, traversalSpec, propertySpec, propertyFilterSpec
|
||||
// this is the equivalent to
|
||||
VItest.runCommand('CreateContainerView', {
|
||||
_this: viewManager,
|
||||
container: rootFolder,
|
||||
type: ['VirtualMachine'],
|
||||
recursive: true,
|
||||
}).once('result', function (result) {
|
||||
// build all the data structures needed to query all the vm names
|
||||
containerView = result.returnval
|
||||
|
||||
objectSpec = {
|
||||
attributes: { 'xsi:type': 'ObjectSpec' }, // setting attributes xsi:type is important or else the server may mis-recognize types!
|
||||
obj: containerView,
|
||||
skip: true,
|
||||
}
|
||||
|
||||
traversalSpec = {
|
||||
attributes: { 'xsi:type': 'TraversalSpec' },
|
||||
name: 'traverseEntities',
|
||||
type: 'ContainerView',
|
||||
path: 'view',
|
||||
skip: false,
|
||||
}
|
||||
|
||||
objectSpec = { ...objectSpec, selectSet: [traversalSpec] }
|
||||
|
||||
propertySpec = {
|
||||
attributes: { 'xsi:type': 'PropertySpec' },
|
||||
type: 'VirtualMachine',
|
||||
pathSet: ['name'],
|
||||
}
|
||||
|
||||
propertyFilterSpec = {
|
||||
attributes: { 'xsi:type': 'PropertyFilterSpec' },
|
||||
propSet: [propertySpec],
|
||||
objectSet: [objectSpec],
|
||||
}
|
||||
// TODO: research why it fails if propSet is declared after objectSet
|
||||
|
||||
VItest.runCommand('RetrievePropertiesEx', {
|
||||
_this: propertyCollector,
|
||||
specSet: [propertyFilterSpec],
|
||||
options: { attributes: { type: 'RetrieveOptions' } },
|
||||
})
|
||||
.once('result', function (result, raw) {
|
||||
assert.notEqual(result.returnval.objects, null)
|
||||
if (Array.isArray(result.returnval.objects)) {
|
||||
assert.strictEqual(result.returnval.objects[0].obj.attributes.type, 'VirtualMachine')
|
||||
} else {
|
||||
assert.strictEqual(result.returnval.objects.obj.attributes.type, 'VirtualMachine')
|
||||
}
|
||||
done()
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error('\n\nlast request : ' + VItest.client.lastRequest, err)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,7 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const isUtf8 = require('isutf8')
|
||||
|
||||
/**
|
||||
* Read a chunk of data from a stream.
|
||||
@@ -22,41 +21,41 @@ const readChunk = (stream, size) =>
|
||||
stream.errored != null
|
||||
? Promise.reject(stream.errored)
|
||||
: stream.closed || stream.readableEnded
|
||||
? Promise.resolve(null)
|
||||
: new Promise((resolve, reject) => {
|
||||
if (size !== undefined) {
|
||||
assert(size > 0)
|
||||
? Promise.resolve(null)
|
||||
: new Promise((resolve, reject) => {
|
||||
if (size !== undefined) {
|
||||
assert(size > 0)
|
||||
|
||||
// per Node documentation:
|
||||
// > The size argument must be less than or equal to 1 GiB.
|
||||
assert(size < 1073741824)
|
||||
}
|
||||
// per Node documentation:
|
||||
// > The size argument must be less than or equal to 1 GiB.
|
||||
assert(size < 1073741824)
|
||||
}
|
||||
|
||||
function onEnd() {
|
||||
resolve(null)
|
||||
function onEnd() {
|
||||
resolve(null)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read(size)
|
||||
if (data !== null) {
|
||||
resolve(data)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read(size)
|
||||
if (data !== null) {
|
||||
resolve(data)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
exports.readChunk = readChunk
|
||||
|
||||
/**
|
||||
@@ -82,13 +81,6 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
|
||||
|
||||
// Buffer.isUtf8 is too recent for now
|
||||
// @todo : replace external package by Buffer.isUtf8 when the supported version of node reach 18
|
||||
|
||||
if (chunk.length < 1024 && isUtf8(chunk)) {
|
||||
error.text = chunk.toString('utf8')
|
||||
}
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
@@ -111,42 +103,42 @@ async function skip(stream, size) {
|
||||
return stream.errored != null
|
||||
? Promise.reject(stream.errored)
|
||||
: size === 0 || stream.closed || stream.readableEnded
|
||||
? Promise.resolve(0)
|
||||
: new Promise((resolve, reject) => {
|
||||
let left = size
|
||||
function onEnd() {
|
||||
resolve(size - left)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read()
|
||||
left -= data === null ? 0 : data.length
|
||||
if (left > 0) {
|
||||
// continue to read
|
||||
} else {
|
||||
// if more than wanted has been read, push back the rest
|
||||
if (left < 0) {
|
||||
stream.unshift(data.slice(left))
|
||||
}
|
||||
|
||||
resolve(size)
|
||||
removeListeners()
|
||||
? Promise.resolve(0)
|
||||
: new Promise((resolve, reject) => {
|
||||
let left = size
|
||||
function onEnd() {
|
||||
resolve(size - left)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read()
|
||||
left -= data === null ? 0 : data.length
|
||||
if (left > 0) {
|
||||
// continue to read
|
||||
} else {
|
||||
// if more than wanted has been read, push back the rest
|
||||
if (left < 0) {
|
||||
stream.unshift(data.slice(left))
|
||||
}
|
||||
|
||||
resolve(size)
|
||||
removeListeners()
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
}
|
||||
exports.skip = skip
|
||||
|
||||
|
||||
@@ -102,37 +102,12 @@ describe('readChunkStrict', function () {
|
||||
assert.strictEqual(error.chunk, undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data, utf8', async () => {
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
|
||||
assert.strictEqual(error.text, 'foobar')
|
||||
assert.deepEqual(error.chunk, Buffer.from('foobar'))
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data, non utf8 ', async () => {
|
||||
const source = [Buffer.alloc(10, 128), Buffer.alloc(10, 128)]
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(source), 30))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 20, expected: 30)')
|
||||
assert.strictEqual(error.text, undefined)
|
||||
assert.deepEqual(error.chunk, Buffer.concat(source))
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data, utf8 , long data', async () => {
|
||||
const source = Buffer.from('a'.repeat(1500))
|
||||
const error = await rejectionOf(readChunkStrict(makeStream([source]), 2000))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, `stream has ended with not enough data (actual: 1500, expected: 2000)`)
|
||||
assert.strictEqual(error.text, undefined)
|
||||
assert.deepEqual(error.chunk, source)
|
||||
})
|
||||
|
||||
it('succeed', async () => {
|
||||
const source = Buffer.from('a'.repeat(20))
|
||||
const chunk = await readChunkStrict(makeStream([source]), 10)
|
||||
assert.deepEqual(source.subarray(10), chunk)
|
||||
})
|
||||
})
|
||||
|
||||
describe('skip', function () {
|
||||
@@ -159,16 +134,6 @@ describe('skip', function () {
|
||||
it('returns less size if stream ends', async () => {
|
||||
assert.deepEqual(await skip(makeStream('foo bar'), 10), 7)
|
||||
})
|
||||
|
||||
it('put back if it read too much', async () => {
|
||||
let source = makeStream(['foo', 'bar'])
|
||||
await skip(source, 1) // read part of data chunk
|
||||
const chunk = (await readChunkStrict(source, 2)).toString('utf-8')
|
||||
assert.strictEqual(chunk, 'oo')
|
||||
|
||||
source = makeStream(['foo', 'bar'])
|
||||
assert.strictEqual(await skip(source, 3), 3) // read aligned with data chunk
|
||||
})
|
||||
})
|
||||
|
||||
describe('skipStrict', function () {
|
||||
@@ -179,9 +144,4 @@ describe('skipStrict', function () {
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
|
||||
assert.deepEqual(error.bytesSkipped, 7)
|
||||
})
|
||||
it('succeed', async () => {
|
||||
const source = makeStream(['foo', 'bar', 'baz'])
|
||||
const res = await skipStrict(source, 4)
|
||||
assert.strictEqual(res, undefined)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "1.2.0",
|
||||
"version": "1.1.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -33,8 +33,5 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.2.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"isutf8": "^4.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=12.3"
|
||||
"node": ">=10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
|
||||
@@ -2,8 +2,10 @@
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
// this object will be sent in the *start* event
|
||||
properties: {
|
||||
// data in this object will be sent along the *start* event
|
||||
//
|
||||
// property names should be chosen as not to clash with properties used by `Task` or `combineEvents`
|
||||
data: {
|
||||
name: 'my task',
|
||||
},
|
||||
|
||||
@@ -14,15 +16,13 @@ const task = new Task({
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId, properties } = event
|
||||
const { name, parentId } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
} else if (type === 'abortionRequested') {
|
||||
const { reason } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
@@ -36,6 +36,7 @@ task.id
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
// - aborted
|
||||
task.status
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
@@ -88,30 +89,6 @@ const onProgress = makeOnProgress({
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
|
||||
// timestamp at which the task started
|
||||
taskLog.start
|
||||
|
||||
// current status of the task as described in the previous section
|
||||
taskLog.status
|
||||
|
||||
// undefined or a dictionary of properties attached to the task
|
||||
taskLog.properties
|
||||
|
||||
// timestamp at which the abortion was requested, undefined otherwise
|
||||
taskLog.abortionRequestedAt
|
||||
|
||||
// undefined or an array of infos emitted on the task
|
||||
taskLog.infos
|
||||
|
||||
// undefined or an array of warnings emitted on the task
|
||||
taskLog.warnings
|
||||
|
||||
// timestamp at which the task ended, undefined otherwise
|
||||
taskLog.end
|
||||
|
||||
// undefined or the result value of the task
|
||||
taskLog.result
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
@@ -123,7 +100,7 @@ const onProgress = makeOnProgress({
|
||||
onTaskUpdate(taskLog) {},
|
||||
})
|
||||
|
||||
Task.run({ properties: { name: 'my task' }, onProgress }, asyncFn)
|
||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
||||
```
|
||||
|
||||
It can also be fed event logs directly:
|
||||
|
||||
@@ -18,8 +18,10 @@ npm install --save @vates/task
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
// this object will be sent in the *start* event
|
||||
properties: {
|
||||
// data in this object will be sent along the *start* event
|
||||
//
|
||||
// property names should be chosen as not to clash with properties used by `Task` or `combineEvents`
|
||||
data: {
|
||||
name: 'my task',
|
||||
},
|
||||
|
||||
@@ -30,15 +32,13 @@ const task = new Task({
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId, properties } = event
|
||||
const { name, parentId } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
} else if (type === 'abortionRequested') {
|
||||
const { reason } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
@@ -52,6 +52,7 @@ task.id
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
// - aborted
|
||||
task.status
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
@@ -104,30 +105,6 @@ const onProgress = makeOnProgress({
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
|
||||
// timestamp at which the task started
|
||||
taskLog.start
|
||||
|
||||
// current status of the task as described in the previous section
|
||||
taskLog.status
|
||||
|
||||
// undefined or a dictionary of properties attached to the task
|
||||
taskLog.properties
|
||||
|
||||
// timestamp at which the abortion was requested, undefined otherwise
|
||||
taskLog.abortionRequestedAt
|
||||
|
||||
// undefined or an array of infos emitted on the task
|
||||
taskLog.infos
|
||||
|
||||
// undefined or an array of warnings emitted on the task
|
||||
taskLog.warnings
|
||||
|
||||
// timestamp at which the task ended, undefined otherwise
|
||||
taskLog.end
|
||||
|
||||
// undefined or the result value of the task
|
||||
taskLog.result
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
@@ -139,7 +116,7 @@ const onProgress = makeOnProgress({
|
||||
onTaskUpdate(taskLog) {},
|
||||
})
|
||||
|
||||
Task.run({ properties: { name: 'my task' }, onProgress }, asyncFn)
|
||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
||||
```
|
||||
|
||||
It can also be fed event logs directly:
|
||||
|
||||
@@ -4,18 +4,36 @@ const assert = require('node:assert').strict
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
function omit(source, keys, target = { __proto__: null }) {
|
||||
for (const key of Object.keys(source)) {
|
||||
if (!keys.has(key)) {
|
||||
target[key] = source[key]
|
||||
}
|
||||
}
|
||||
return target
|
||||
}
|
||||
|
||||
const IGNORED_START_PROPS = new Set([
|
||||
'end',
|
||||
'infos',
|
||||
'properties',
|
||||
'result',
|
||||
'status',
|
||||
'tasks',
|
||||
'timestamp',
|
||||
'type',
|
||||
'warnings',
|
||||
])
|
||||
|
||||
exports.makeOnProgress = function ({ onRootTaskEnd = noop, onRootTaskStart = noop, onTaskUpdate = noop }) {
|
||||
const taskLogs = new Map()
|
||||
return function onProgress(event) {
|
||||
const { id, type } = event
|
||||
let taskLog
|
||||
if (type === 'start') {
|
||||
taskLog = {
|
||||
id,
|
||||
properties: { __proto__: null, ...event.properties },
|
||||
start: event.timestamp,
|
||||
status: 'pending',
|
||||
}
|
||||
taskLog = omit(event, IGNORED_START_PROPS)
|
||||
taskLog.start = event.timestamp
|
||||
taskLog.status = 'pending'
|
||||
taskLogs.set(id, taskLog)
|
||||
|
||||
const { parentId } = event
|
||||
@@ -47,8 +65,6 @@ exports.makeOnProgress = function ({ onRootTaskEnd = noop, onRootTaskStart = noo
|
||||
taskLog.end = event.timestamp
|
||||
taskLog.result = event.result
|
||||
taskLog.status = event.status
|
||||
} else if (type === 'abortionRequested') {
|
||||
taskLog.abortionRequestedAt = event.timestamp
|
||||
}
|
||||
|
||||
if (type === 'end' && taskLog.$root === taskLog) {
|
||||
|
||||
@@ -11,7 +11,7 @@ describe('makeOnProgress()', function () {
|
||||
const events = []
|
||||
let log
|
||||
const task = new Task({
|
||||
properties: { name: 'task' },
|
||||
data: { name: 'task' },
|
||||
onProgress: makeOnProgress({
|
||||
onRootTaskStart(log_) {
|
||||
assert.equal(log, undefined)
|
||||
@@ -32,50 +32,36 @@ describe('makeOnProgress()', function () {
|
||||
|
||||
assert.equal(events.length, 0)
|
||||
|
||||
let i = 0
|
||||
|
||||
await task.run(async () => {
|
||||
assert.equal(events[i++], 'onRootTaskStart')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.id, task.id)
|
||||
assert.equal(log.properties.name, 'task')
|
||||
assert(Math.abs(log.start - Date.now()) < 10)
|
||||
|
||||
Task.set('name', 'new name')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.name, 'new name')
|
||||
assert.equal(events[0], 'onRootTaskStart')
|
||||
assert.equal(events[1], 'onTaskUpdate')
|
||||
assert.equal(log.name, 'task')
|
||||
|
||||
Task.set('progress', 0)
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(events[2], 'onTaskUpdate')
|
||||
assert.equal(log.properties.progress, 0)
|
||||
|
||||
Task.info('foo', {})
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(events[3], 'onTaskUpdate')
|
||||
assert.deepEqual(log.infos, [{ data: {}, message: 'foo' }])
|
||||
|
||||
const subtask = new Task({ properties: { name: 'subtask' } })
|
||||
await subtask.run(() => {
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].properties.name, 'subtask')
|
||||
await Task.run({ data: { name: 'subtask' } }, () => {
|
||||
assert.equal(events[4], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].name, 'subtask')
|
||||
|
||||
Task.warning('bar', {})
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(events[5], 'onTaskUpdate')
|
||||
assert.deepEqual(log.tasks[0].warnings, [{ data: {}, message: 'bar' }])
|
||||
|
||||
subtask.abort()
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert(Math.abs(log.tasks[0].abortionRequestedAt - Date.now()) < 10)
|
||||
})
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(events[6], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].status, 'success')
|
||||
|
||||
Task.set('progress', 100)
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(events[7], 'onTaskUpdate')
|
||||
assert.equal(log.properties.progress, 100)
|
||||
})
|
||||
assert.equal(events[i++], 'onRootTaskEnd')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert(Math.abs(log.end - Date.now()) < 10)
|
||||
assert.equal(events[8], 'onRootTaskEnd')
|
||||
assert.equal(events[9], 'onTaskUpdate')
|
||||
assert.equal(log.status, 'success')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -10,10 +10,11 @@ function define(object, property, value) {
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const ABORTED = 'aborted'
|
||||
const FAILURE = 'failure'
|
||||
const PENDING = 'pending'
|
||||
const SUCCESS = 'success'
|
||||
exports.STATUS = { FAILURE, PENDING, SUCCESS }
|
||||
exports.STATUS = { ABORTED, FAILURE, PENDING, SUCCESS }
|
||||
|
||||
// stored in the global context so that various versions of the library can interact.
|
||||
const asyncStorageKey = '@vates/task@0'
|
||||
@@ -82,8 +83,8 @@ exports.Task = class Task {
|
||||
return this.#status
|
||||
}
|
||||
|
||||
constructor({ properties, onProgress } = {}) {
|
||||
this.#startData = { properties }
|
||||
constructor({ data = {}, onProgress } = {}) {
|
||||
this.#startData = data
|
||||
|
||||
if (onProgress !== undefined) {
|
||||
this.#onProgress = onProgress
|
||||
@@ -104,16 +105,12 @@ exports.Task = class Task {
|
||||
|
||||
const { signal } = this.#abortController
|
||||
signal.addEventListener('abort', () => {
|
||||
if (this.status === PENDING) {
|
||||
if (this.status === PENDING && !this.#running) {
|
||||
this.#maybeStart()
|
||||
|
||||
this.#emit('abortionRequested', { reason: signal.reason })
|
||||
|
||||
if (!this.#running) {
|
||||
const status = FAILURE
|
||||
this.#status = status
|
||||
this.#emit('end', { result: signal.reason, status })
|
||||
}
|
||||
const status = ABORTED
|
||||
this.#status = status
|
||||
this.#emit('end', { result: signal.reason, status })
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -159,7 +156,9 @@ exports.Task = class Task {
|
||||
this.#running = false
|
||||
return result
|
||||
} catch (result) {
|
||||
const status = FAILURE
|
||||
const { signal } = this.#abortController
|
||||
const aborted = signal.aborted && result === signal.reason
|
||||
const status = aborted ? ABORTED : FAILURE
|
||||
|
||||
this.#status = status
|
||||
this.#emit('end', { status, result })
|
||||
|
||||
@@ -15,7 +15,7 @@ function assertEvent(task, expected, eventIndex = -1) {
|
||||
assert.equal(typeof actual.id, 'string')
|
||||
assert.equal(typeof actual.timestamp, 'number')
|
||||
for (const keys of Object.keys(expected)) {
|
||||
assert.deepEqual(actual[keys], expected[keys])
|
||||
assert.equal(actual[keys], expected[keys])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,10 +30,10 @@ function createTask(opts) {
|
||||
describe('Task', function () {
|
||||
describe('contructor', function () {
|
||||
it('data properties are passed to the start event', async function () {
|
||||
const properties = { foo: 0, bar: 1 }
|
||||
const task = createTask({ properties })
|
||||
const data = { foo: 0, bar: 1 }
|
||||
const task = createTask({ data })
|
||||
await task.run(noop)
|
||||
assertEvent(task, { type: 'start', properties }, 0)
|
||||
assertEvent(task, { ...data, type: 'start' }, 0)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -79,22 +79,20 @@ describe('Task', function () {
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
assert.equal(task.status, 'aborted')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assert.equal(task.$events.length, 2)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result: reason }, 2)
|
||||
assertEvent(task, { type: 'end', status: 'aborted', result: reason }, 1)
|
||||
})
|
||||
|
||||
it('does not abort if the task fails without the abort reason', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
const result = new Error()
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort(reason)
|
||||
task.abort({})
|
||||
|
||||
throw result
|
||||
})
|
||||
@@ -102,20 +100,18 @@ describe('Task', function () {
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assert.equal(task.$events.length, 2)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result }, 2)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result }, 1)
|
||||
})
|
||||
|
||||
it('does not abort if the task succeed', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
const result = {}
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort(reason)
|
||||
task.abort({})
|
||||
|
||||
return result
|
||||
})
|
||||
@@ -123,10 +119,9 @@ describe('Task', function () {
|
||||
|
||||
assert.equal(task.status, 'success')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assert.equal(task.$events.length, 2)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 2)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 1)
|
||||
})
|
||||
|
||||
it('aborts before task is running', function () {
|
||||
@@ -135,12 +130,11 @@ describe('Task', function () {
|
||||
|
||||
task.abort(reason)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
assert.equal(task.status, 'aborted')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assert.equal(task.$events.length, 2)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result: reason }, 2)
|
||||
assertEvent(task, { type: 'end', status: 'aborted', result: reason }, 1)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -249,7 +243,7 @@ describe('Task', function () {
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
|
||||
it('changes to failure if aborted after run is complete', async function () {
|
||||
it('changes to aborted after run is complete', async function () {
|
||||
const task = createTask()
|
||||
await task
|
||||
.run(() => {
|
||||
@@ -258,13 +252,13 @@ describe('Task', function () {
|
||||
Task.abortSignal.throwIfAborted()
|
||||
})
|
||||
.catch(noop)
|
||||
assert.equal(task.status, 'failure')
|
||||
assert.equal(task.status, 'aborted')
|
||||
})
|
||||
|
||||
it('changes to failure if aborted when not running', function () {
|
||||
it('changes to aborted if aborted when not running', async function () {
|
||||
const task = createTask()
|
||||
task.abort()
|
||||
assert.equal(task.status, 'failure')
|
||||
assert.equal(task.status, 'aborted')
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.2.0",
|
||||
"version": "0.1.2",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"sinon": "^17.0.1",
|
||||
"sinon": "^15.0.1",
|
||||
"test": "^3.2.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.mjs'
|
||||
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.js'
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
import getopts from 'getopts'
|
||||
import { basename, dirname } from 'path'
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.44.3",
|
||||
"@xen-orchestra/fs": "^4.1.3",
|
||||
"filenamify": "^6.0.0",
|
||||
"@xen-orchestra/backups": "^0.36.0",
|
||||
"@xen-orchestra/fs": "^3.3.4",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.21.0"
|
||||
@@ -27,7 +27,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "1.0.14",
|
||||
"version": "1.0.6",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
import { Metadata } from './_runners/Metadata.mjs'
|
||||
import { VmsRemote } from './_runners/VmsRemote.mjs'
|
||||
import { VmsXapi } from './_runners/VmsXapi.mjs'
|
||||
|
||||
export function createRunner(opts) {
|
||||
const { type } = opts.job
|
||||
switch (type) {
|
||||
case 'backup':
|
||||
return new VmsXapi(opts)
|
||||
case 'mirrorBackup':
|
||||
return new VmsRemote(opts)
|
||||
case 'metadataBackup':
|
||||
return new Metadata(opts)
|
||||
default:
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
'use strict'
|
||||
|
||||
export class DurablePartition {
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
exports.DurablePartition = class DurablePartition {
|
||||
// private resource API is used exceptionally to be able to separate resource creation and release
|
||||
#partitionDisposers = {}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { Task } from './Task.mjs'
|
||||
'use strict'
|
||||
|
||||
export class HealthCheckVmBackup {
|
||||
const { Task } = require('./Task')
|
||||
|
||||
exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
#restoredVm
|
||||
#timeout
|
||||
#xapi
|
||||
73
@xen-orchestra/backups/ImportVmBackup.js
Normal file
73
@xen-orchestra/backups/ImportVmBackup.js
Normal file
@@ -0,0 +1,73 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { importIncrementalVm } = require('./_incrementalVm.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
exports.ImportVmBackup = class ImportVmBackup {
|
||||
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses, mapVdisSrs = {} } = {} }) {
|
||||
this._adapter = adapter
|
||||
this._importIncrementalVmSettings = { newMacAddresses, mapVdisSrs }
|
||||
this._metadata = metadata
|
||||
this._srUuid = srUuid
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const adapter = this._adapter
|
||||
const metadata = this._metadata
|
||||
const isFull = metadata.mode === 'full'
|
||||
|
||||
const sizeContainer = { size: 0 }
|
||||
|
||||
let backup
|
||||
if (isFull) {
|
||||
backup = await adapter.readFullVmBackup(metadata)
|
||||
watchStreamSize(backup, sizeContainer)
|
||||
} else {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
const ignoredVdis = new Set(
|
||||
Object.entries(this._importIncrementalVmSettings.mapVdisSrs)
|
||||
.filter(([_, srUuid]) => srUuid === null)
|
||||
.map(([vdiUuid]) => vdiUuid)
|
||||
)
|
||||
backup = await adapter.readIncrementalVmBackup(metadata, ignoredVdis)
|
||||
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
|
||||
}
|
||||
|
||||
return Task.run(
|
||||
{
|
||||
name: 'transfer',
|
||||
},
|
||||
async () => {
|
||||
const xapi = this._xapi
|
||||
const srRef = await xapi.call('SR.get_by_uuid', this._srUuid)
|
||||
|
||||
const vmRef = isFull
|
||||
? await xapi.VM_import(backup, srRef)
|
||||
: await importIncrementalVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
...this._importIncrementalVmSettings,
|
||||
detectBase: false,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
xapi.call('VM.add_tags', vmRef, 'restored from backup'),
|
||||
xapi.call(
|
||||
'VM.set_name_label',
|
||||
vmRef,
|
||||
`${metadata.vm.name_label} (${formatFilenameDate(metadata.timestamp)})`
|
||||
),
|
||||
])
|
||||
|
||||
return {
|
||||
size: sizeContainer.size,
|
||||
id: await xapi.getField('VM', vmRef, 'uuid'),
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,280 +0,0 @@
|
||||
import assert from 'node:assert'
|
||||
|
||||
import { formatFilenameDate } from './_filenameDate.mjs'
|
||||
import { importIncrementalVm } from './_incrementalVm.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
import { watchStreamSize } from './_watchStreamSize.mjs'
|
||||
import { VhdNegative, VhdSynthetic } from 'vhd-lib'
|
||||
import { decorateClass } from '@vates/decorate-with'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { dirname, join } from 'node:path'
|
||||
import pickBy from 'lodash/pickBy.js'
|
||||
import { defer } from 'golike-defer'
|
||||
|
||||
const { debug, info, warn } = createLogger('xo:backups:importVmBackup')
|
||||
async function resolveUuid(xapi, cache, uuid, type) {
|
||||
if (uuid == null) {
|
||||
return uuid
|
||||
}
|
||||
const ref = cache.get(uuid)
|
||||
if (ref === undefined) {
|
||||
cache.set(uuid, xapi.call(`${type}.get_by_uuid`, uuid))
|
||||
}
|
||||
return cache.get(uuid)
|
||||
}
|
||||
export class ImportVmBackup {
|
||||
constructor({
|
||||
adapter,
|
||||
metadata,
|
||||
srUuid,
|
||||
xapi,
|
||||
settings: { additionnalVmTag, newMacAddresses, mapVdisSrs = {}, useDifferentialRestore = false } = {},
|
||||
}) {
|
||||
this._adapter = adapter
|
||||
this._importIncrementalVmSettings = { additionnalVmTag, newMacAddresses, mapVdisSrs, useDifferentialRestore }
|
||||
this._metadata = metadata
|
||||
this._srUuid = srUuid
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async #getPathOfVdiSnapshot(snapshotUuid) {
|
||||
const metadata = this._metadata
|
||||
if (this._pathToVdis === undefined) {
|
||||
const backups = await this._adapter.listVmBackups(
|
||||
this._metadata.vm.uuid,
|
||||
({ mode, timestamp }) => mode === 'delta' && timestamp >= metadata.timestamp
|
||||
)
|
||||
const map = new Map()
|
||||
for (const backup of backups) {
|
||||
for (const [vdiRef, vdi] of Object.entries(backup.vdis)) {
|
||||
map.set(vdi.uuid, backup.vhds[vdiRef])
|
||||
}
|
||||
}
|
||||
this._pathToVdis = map
|
||||
}
|
||||
return this._pathToVdis.get(snapshotUuid)
|
||||
}
|
||||
|
||||
async _reuseNearestSnapshot($defer, ignoredVdis) {
|
||||
const metadata = this._metadata
|
||||
const { mapVdisSrs } = this._importIncrementalVmSettings
|
||||
const { vbds, vhds, vifs, vm, vmSnapshot } = metadata
|
||||
const streams = {}
|
||||
const metdataDir = dirname(metadata._filename)
|
||||
const vdis = ignoredVdis === undefined ? metadata.vdis : pickBy(metadata.vdis, vdi => !ignoredVdis.has(vdi.uuid))
|
||||
|
||||
for (const [vdiRef, vdi] of Object.entries(vdis)) {
|
||||
const vhdPath = join(metdataDir, vhds[vdiRef])
|
||||
|
||||
let xapiDisk
|
||||
try {
|
||||
xapiDisk = await this._xapi.getRecordByUuid('VDI', vdi.$snapshot_of$uuid)
|
||||
} catch (err) {
|
||||
// if this disk is not present anymore, fall back to default restore
|
||||
warn(err)
|
||||
}
|
||||
|
||||
let snapshotCandidate, backupCandidate
|
||||
if (xapiDisk !== undefined) {
|
||||
debug('found disks, wlll search its snapshots', { snapshots: xapiDisk.snapshots })
|
||||
for (const snapshotRef of xapiDisk.snapshots) {
|
||||
const snapshot = await this._xapi.getRecord('VDI', snapshotRef)
|
||||
debug('handling snapshot', { snapshot })
|
||||
|
||||
// take only the first snapshot
|
||||
if (snapshotCandidate && snapshotCandidate.snapshot_time < snapshot.snapshot_time) {
|
||||
debug('already got a better candidate')
|
||||
continue
|
||||
}
|
||||
|
||||
// have a corresponding backup more recent than metadata ?
|
||||
const pathToSnapshotData = await this.#getPathOfVdiSnapshot(snapshot.uuid)
|
||||
if (pathToSnapshotData === undefined) {
|
||||
debug('no backup linked to this snaphot')
|
||||
continue
|
||||
}
|
||||
if (snapshot.$SR.uuid !== (mapVdisSrs[vdi.$snapshot_of$uuid] ?? this._srUuid)) {
|
||||
debug('not restored on the same SR', { snapshotSr: snapshot.$SR.uuid, mapVdisSrs, srUuid: this._srUuid })
|
||||
continue
|
||||
}
|
||||
|
||||
debug('got a candidate', pathToSnapshotData)
|
||||
|
||||
snapshotCandidate = snapshot
|
||||
backupCandidate = pathToSnapshotData
|
||||
}
|
||||
}
|
||||
|
||||
let stream
|
||||
const backupWithSnapshotPath = join(metdataDir, backupCandidate ?? '')
|
||||
if (vhdPath === backupWithSnapshotPath) {
|
||||
// all the data are already on the host
|
||||
debug('direct reuse of a snapshot')
|
||||
stream = null
|
||||
vdis[vdiRef].baseVdi = snapshotCandidate
|
||||
// go next disk , we won't use this stream
|
||||
continue
|
||||
}
|
||||
|
||||
let disposableDescendants
|
||||
|
||||
const disposableSynthetic = await VhdSynthetic.fromVhdChain(this._adapter._handler, vhdPath)
|
||||
|
||||
// this will also clean if another disk of this VM backup fails
|
||||
// if user really only need to restore non failing disks he can retry with ignoredVdis
|
||||
let disposed = false
|
||||
const disposeOnce = async () => {
|
||||
if (!disposed) {
|
||||
disposed = true
|
||||
try {
|
||||
await disposableDescendants?.dispose()
|
||||
await disposableSynthetic?.dispose()
|
||||
} catch (error) {
|
||||
warn('openVhd: failed to dispose VHDs', { error })
|
||||
}
|
||||
}
|
||||
}
|
||||
$defer.onFailure(() => disposeOnce())
|
||||
|
||||
const parentVhd = disposableSynthetic.value
|
||||
await parentVhd.readBlockAllocationTable()
|
||||
debug('got vhd synthetic of parents', parentVhd.length)
|
||||
|
||||
if (snapshotCandidate !== undefined) {
|
||||
try {
|
||||
debug('will try to use differential restore', {
|
||||
backupWithSnapshotPath,
|
||||
vhdPath,
|
||||
vdiRef,
|
||||
})
|
||||
|
||||
disposableDescendants = await VhdSynthetic.fromVhdChain(this._adapter._handler, backupWithSnapshotPath, {
|
||||
until: vhdPath,
|
||||
})
|
||||
const descendantsVhd = disposableDescendants.value
|
||||
await descendantsVhd.readBlockAllocationTable()
|
||||
debug('got vhd synthetic of descendants')
|
||||
const negativeVhd = new VhdNegative(parentVhd, descendantsVhd)
|
||||
debug('got vhd negative')
|
||||
|
||||
// update the stream with the negative vhd stream
|
||||
stream = await negativeVhd.stream()
|
||||
vdis[vdiRef].baseVdi = snapshotCandidate
|
||||
} catch (err) {
|
||||
// can be a broken VHD chain, a vhd chain with a key backup, ....
|
||||
// not an irrecuperable error, don't dispose parentVhd, and fallback to full restore
|
||||
warn(`can't use differential restore`, err)
|
||||
disposableDescendants?.dispose()
|
||||
}
|
||||
}
|
||||
// didn't make a negative stream : fallback to classic stream
|
||||
if (stream === undefined) {
|
||||
debug('use legacy restore')
|
||||
stream = await parentVhd.stream()
|
||||
}
|
||||
|
||||
stream.on('end', disposeOnce)
|
||||
stream.on('close', disposeOnce)
|
||||
stream.on('error', disposeOnce)
|
||||
info('everything is ready, will transfer', stream.length)
|
||||
streams[`${vdiRef}.vhd`] = stream
|
||||
}
|
||||
return {
|
||||
streams,
|
||||
vbds,
|
||||
vdis,
|
||||
version: '1.0.0',
|
||||
vifs,
|
||||
vm: { ...vm, suspend_VDI: vmSnapshot.suspend_VDI },
|
||||
}
|
||||
}
|
||||
|
||||
async #decorateIncrementalVmMetadata() {
|
||||
const { additionnalVmTag, mapVdisSrs, useDifferentialRestore } = this._importIncrementalVmSettings
|
||||
|
||||
const ignoredVdis = new Set(
|
||||
Object.entries(mapVdisSrs)
|
||||
.filter(([_, srUuid]) => srUuid === null)
|
||||
.map(([vdiUuid]) => vdiUuid)
|
||||
)
|
||||
let backup
|
||||
if (useDifferentialRestore) {
|
||||
backup = await this._reuseNearestSnapshot(ignoredVdis)
|
||||
} else {
|
||||
backup = await this._adapter.readIncrementalVmBackup(this._metadata, ignoredVdis)
|
||||
}
|
||||
const xapi = this._xapi
|
||||
|
||||
const cache = new Map()
|
||||
const mapVdisSrRefs = {}
|
||||
if (additionnalVmTag !== undefined) {
|
||||
backup.vm.tags.push(additionnalVmTag)
|
||||
}
|
||||
for (const [vdiUuid, srUuid] of Object.entries(mapVdisSrs)) {
|
||||
mapVdisSrRefs[vdiUuid] = await resolveUuid(xapi, cache, srUuid, 'SR')
|
||||
}
|
||||
const srRef = await resolveUuid(xapi, cache, this._srUuid, 'SR')
|
||||
Object.values(backup.vdis).forEach(vdi => {
|
||||
vdi.SR = mapVdisSrRefs[vdi.uuid] ?? srRef
|
||||
})
|
||||
return backup
|
||||
}
|
||||
|
||||
async run() {
|
||||
const adapter = this._adapter
|
||||
const metadata = this._metadata
|
||||
const isFull = metadata.mode === 'full'
|
||||
|
||||
const sizeContainer = { size: 0 }
|
||||
const { newMacAddresses } = this._importIncrementalVmSettings
|
||||
let backup
|
||||
if (isFull) {
|
||||
backup = await adapter.readFullVmBackup(metadata)
|
||||
watchStreamSize(backup, sizeContainer)
|
||||
} else {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
backup = await this.#decorateIncrementalVmMetadata()
|
||||
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
|
||||
}
|
||||
|
||||
return Task.run(
|
||||
{
|
||||
name: 'transfer',
|
||||
},
|
||||
async () => {
|
||||
const xapi = this._xapi
|
||||
const srRef = await xapi.call('SR.get_by_uuid', this._srUuid)
|
||||
|
||||
const vmRef = isFull
|
||||
? await xapi.VM_import(backup, srRef)
|
||||
: await importIncrementalVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
newMacAddresses,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
xapi.call('VM.add_tags', vmRef, 'restored from backup'),
|
||||
xapi.call(
|
||||
'VM.set_name_label',
|
||||
vmRef,
|
||||
`${metadata.vm.name_label} (${formatFilenameDate(metadata.timestamp)})`
|
||||
),
|
||||
xapi.call(
|
||||
'VM.set_name_description',
|
||||
vmRef,
|
||||
`Restored on ${formatFilenameDate(+new Date())} from ${adapter._handler._remote.name} -
|
||||
${metadata.vm.name_description}
|
||||
`
|
||||
),
|
||||
])
|
||||
|
||||
return {
|
||||
size: sizeContainer.size,
|
||||
id: await xapi.getField('VM', vmRef, 'uuid'),
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
decorateClass(ImportVmBackup, { _reuseNearestSnapshot: defer })
|
||||
@@ -1,13 +1,16 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
'use strict'
|
||||
|
||||
import { DIR_XO_POOL_METADATA_BACKUPS } from '../RemoteAdapter.mjs'
|
||||
import { forkStreamUnpipe } from './_forkStreamUnpipe.mjs'
|
||||
import { formatFilenameDate } from '../_filenameDate.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
export const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { forkStreamUnpipe } = require('./_backupJob/forkStreamUnpipe.js')
|
||||
|
||||
export class PoolMetadataBackup {
|
||||
const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
exports.PATH_DB_DUMP = PATH_DB_DUMP
|
||||
|
||||
exports.PoolMetadataBackup = class PoolMetadataBackup {
|
||||
constructor({ config, job, pool, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
@@ -1,39 +1,43 @@
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import { compose } from '@vates/compose'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } from 'vhd-lib'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { deduped } from '@vates/disposable/deduped.js'
|
||||
import { dirname, join, resolve } from 'node:path'
|
||||
import { execFile } from 'child_process'
|
||||
import { mount } from '@vates/fuse-vhd'
|
||||
import { readdir, lstat } from 'node:fs/promises'
|
||||
import { synchronized } from 'decorator-synchronized'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { ZipFile } from 'yazl'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import fromEvent from 'promise-toolbox/fromEvent'
|
||||
import groupBy from 'lodash/groupBy.js'
|
||||
import pDefer from 'promise-toolbox/defer'
|
||||
import pickBy from 'lodash/pickBy.js'
|
||||
import tar from 'tar'
|
||||
import zlib from 'zlib'
|
||||
'use strict'
|
||||
|
||||
import { BACKUP_DIR } from './_getVmBackupDir.mjs'
|
||||
import { cleanVm } from './_cleanVm.mjs'
|
||||
import { formatFilenameDate } from './_filenameDate.mjs'
|
||||
import { getTmpDir } from './_getTmpDir.mjs'
|
||||
import { isMetadataFile } from './_backupType.mjs'
|
||||
import { isValidXva } from './_isValidXva.mjs'
|
||||
import { listPartitions, LVM_PARTITION_TYPE } from './_listPartitions.mjs'
|
||||
import { lvs, pvs } from './_lvm.mjs'
|
||||
import { watchStreamSize } from './_watchStreamSize.mjs'
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { synchronized } = require('decorator-synchronized')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const fromEvent = require('promise-toolbox/fromEvent')
|
||||
const pDefer = require('promise-toolbox/defer')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const pickBy = require('lodash/pickBy.js')
|
||||
const { dirname, join, normalize, resolve } = require('path')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { execFile } = require('child_process')
|
||||
const { readdir, lstat } = require('fs-extra')
|
||||
const { v4: uuidv4 } = require('uuid')
|
||||
const { ZipFile } = require('yazl')
|
||||
const zlib = require('zlib')
|
||||
|
||||
export const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
|
||||
const { cleanVm } = require('./_cleanVm.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { getTmpDir } = require('./_getTmpDir.js')
|
||||
const { isMetadataFile } = require('./_backupType.js')
|
||||
const { isValidXva } = require('./_isValidXva.js')
|
||||
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
|
||||
const { lvs, pvs } = require('./_lvm.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize')
|
||||
// @todo : this import is marked extraneous , sould be fixed when lib is published
|
||||
const { mount } = require('@vates/fuse-vhd')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
|
||||
export const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
|
||||
|
||||
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
|
||||
|
||||
@@ -42,23 +46,20 @@ const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
||||
const noop = Function.prototype
|
||||
|
||||
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
||||
const makeRelative = path => resolve('/', path).slice(1)
|
||||
const resolveSubpath = (root, path) => resolve(root, makeRelative(path))
|
||||
|
||||
async function addZipEntries(zip, realBasePath, virtualBasePath, relativePaths) {
|
||||
for (const relativePath of relativePaths) {
|
||||
const realPath = join(realBasePath, relativePath)
|
||||
const virtualPath = join(virtualBasePath, relativePath)
|
||||
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
|
||||
|
||||
const stats = await lstat(realPath)
|
||||
const { mode, mtime } = stats
|
||||
const opts = { mode, mtime }
|
||||
if (stats.isDirectory()) {
|
||||
zip.addEmptyDirectory(virtualPath, opts)
|
||||
await addZipEntries(zip, realPath, virtualPath, await readdir(realPath))
|
||||
} else if (stats.isFile()) {
|
||||
zip.addFile(realPath, virtualPath, opts)
|
||||
}
|
||||
async function addDirectory(files, realPath, metadataPath) {
|
||||
const stats = await lstat(realPath)
|
||||
if (stats.isDirectory()) {
|
||||
await asyncMap(await readdir(realPath), file =>
|
||||
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
|
||||
)
|
||||
} else if (stats.isFile()) {
|
||||
files.push({
|
||||
realPath,
|
||||
metadataPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +76,7 @@ const debounceResourceFactory = factory =>
|
||||
return this._debounceResource(factory.apply(this, arguments))
|
||||
}
|
||||
|
||||
export class RemoteAdapter {
|
||||
class RemoteAdapter {
|
||||
constructor(
|
||||
handler,
|
||||
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy = false } = {}
|
||||
@@ -186,6 +187,17 @@ export class RemoteAdapter {
|
||||
})
|
||||
}
|
||||
|
||||
async *_usePartitionFiles(diskId, partitionId, paths) {
|
||||
const path = yield this.getPartition(diskId, partitionId)
|
||||
|
||||
const files = []
|
||||
await asyncMap(paths, file =>
|
||||
addDirectory(files, resolveSubpath(path, file), normalize('./' + file).replace(/\/+$/, ''))
|
||||
)
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
// check if we will be allowed to merge a a vhd created in this adapter
|
||||
// with the vhd at path `path`
|
||||
async isMergeableParent(packedParentUid, path) {
|
||||
@@ -202,24 +214,15 @@ export class RemoteAdapter {
|
||||
})
|
||||
}
|
||||
|
||||
fetchPartitionFiles(diskId, partitionId, paths, format) {
|
||||
fetchPartitionFiles(diskId, partitionId, paths) {
|
||||
const { promise, reject, resolve } = pDefer()
|
||||
Disposable.use(
|
||||
async function* () {
|
||||
const path = yield this.getPartition(diskId, partitionId)
|
||||
let outputStream
|
||||
|
||||
if (format === 'tgz') {
|
||||
outputStream = tar.c({ cwd: path, gzip: true }, paths.map(makeRelative))
|
||||
} else if (format === 'zip') {
|
||||
const zip = new ZipFile()
|
||||
await addZipEntries(zip, path, '', paths.map(makeRelative))
|
||||
zip.end()
|
||||
;({ outputStream } = zip)
|
||||
} else {
|
||||
throw new Error('unsupported format ' + format)
|
||||
}
|
||||
|
||||
const files = yield this._usePartitionFiles(diskId, partitionId, paths)
|
||||
const zip = new ZipFile()
|
||||
files.forEach(({ realPath, metadataPath }) => zip.addFile(realPath, metadataPath))
|
||||
zip.end()
|
||||
const { outputStream } = zip
|
||||
resolve(outputStream)
|
||||
await fromEvent(outputStream, 'end')
|
||||
}.bind(this)
|
||||
@@ -249,7 +252,7 @@ export class RemoteAdapter {
|
||||
)
|
||||
}
|
||||
|
||||
async deleteDeltaVmBackups(backups) {
|
||||
async deleteIncrementalVmBackups(backups) {
|
||||
const handler = this._handler
|
||||
|
||||
// this will delete the json, unused VHDs will be detected by `cleanVm`
|
||||
@@ -301,7 +304,7 @@ export class RemoteAdapter {
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
delta !== undefined && this.deleteDeltaVmBackups(delta),
|
||||
delta !== undefined && this.deleteIncrementalVmBackups(delta),
|
||||
full !== undefined && this.deleteFullVmBackups(full),
|
||||
])
|
||||
|
||||
@@ -330,7 +333,7 @@ export class RemoteAdapter {
|
||||
const RE_VHDI = /^vhdi(\d+)$/
|
||||
const handler = this._handler
|
||||
|
||||
const diskPath = handler.getFilePath('/' + diskId)
|
||||
const diskPath = handler._getFilePath('/' + diskId)
|
||||
const mountDir = yield getTmpDir()
|
||||
await fromCallback(execFile, 'vhdimount', [diskPath, mountDir])
|
||||
try {
|
||||
@@ -401,27 +404,20 @@ export class RemoteAdapter {
|
||||
return `${baseName}.vhd`
|
||||
}
|
||||
|
||||
async listAllVms() {
|
||||
async listAllVmBackups() {
|
||||
const handler = this._handler
|
||||
const vmsUuids = []
|
||||
await asyncEach(await handler.list(BACKUP_DIR), async entry => {
|
||||
|
||||
const backups = { __proto__: null }
|
||||
await asyncMap(await handler.list(BACKUP_DIR), async entry => {
|
||||
// ignore hidden and lock files
|
||||
if (entry[0] !== '.' && !entry.endsWith('.lock')) {
|
||||
vmsUuids.push(entry)
|
||||
const vmBackups = await this.listVmBackups(entry)
|
||||
if (vmBackups.length !== 0) {
|
||||
backups[entry] = vmBackups
|
||||
}
|
||||
}
|
||||
})
|
||||
return vmsUuids
|
||||
}
|
||||
|
||||
async listAllVmBackups() {
|
||||
const vmsUuids = await this.listAllVms()
|
||||
const backups = { __proto__: null }
|
||||
await asyncEach(vmsUuids, async vmUuid => {
|
||||
const vmBackups = await this.listVmBackups(vmUuid)
|
||||
if (vmBackups.length !== 0) {
|
||||
backups[vmUuid] = vmBackups
|
||||
}
|
||||
})
|
||||
return backups
|
||||
}
|
||||
|
||||
@@ -681,13 +677,11 @@ export class RemoteAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
async outputStream(path, input, { checksum = true, maxStreamLength, streamLength, validator = noop } = {}) {
|
||||
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
|
||||
const container = watchStreamSize(input)
|
||||
await this._handler.outputStream(path, input, {
|
||||
checksum,
|
||||
dirMode: this._dirMode,
|
||||
maxStreamLength,
|
||||
streamLength,
|
||||
async validator() {
|
||||
await input.task
|
||||
return validator.apply(this, arguments)
|
||||
@@ -697,8 +691,8 @@ export class RemoteAdapter {
|
||||
}
|
||||
|
||||
// open the hierarchy of ancestors until we find a full one
|
||||
async _createVhdStream(handler, path, { useChain }) {
|
||||
const disposableSynthetic = useChain ? await VhdSynthetic.fromVhdChain(handler, path) : await openVhd(handler, path)
|
||||
async _createSyntheticStream(handler, path) {
|
||||
const disposableSynthetic = await VhdSynthetic.fromVhdChain(handler, path)
|
||||
// I don't want the vhds to be disposed on return
|
||||
// but only when the stream is done ( or failed )
|
||||
|
||||
@@ -723,7 +717,7 @@ export class RemoteAdapter {
|
||||
return stream
|
||||
}
|
||||
|
||||
async readIncrementalVmBackup(metadata, ignoredVdis, { useChain = true } = {}) {
|
||||
async readIncrementalVmBackup(metadata, ignoredVdis, { useSynthetic = true } = {}) {
|
||||
const handler = this._handler
|
||||
const { vbds, vhds, vifs, vm, vmSnapshot } = metadata
|
||||
const dir = dirname(metadata._filename)
|
||||
@@ -731,7 +725,9 @@ export class RemoteAdapter {
|
||||
|
||||
const streams = {}
|
||||
await asyncMapSettled(Object.keys(vdis), async ref => {
|
||||
streams[`${ref}.vhd`] = await this._createVhdStream(handler, join(dir, vhds[ref]), { useChain })
|
||||
streams[`${ref}.vhd`] = useSynthetic
|
||||
? await this._createSyntheticStream(handler, join(dir, vhds[ref]))
|
||||
: await this._handler.createReadStream(join(dir, vhds[ref]))
|
||||
})
|
||||
|
||||
return {
|
||||
@@ -828,7 +824,11 @@ decorateMethodsWith(RemoteAdapter, {
|
||||
debounceResourceFactory,
|
||||
]),
|
||||
|
||||
_usePartitionFiles: Disposable.factory,
|
||||
|
||||
getDisk: compose([Disposable.factory, [deduped, diskId => [diskId]], debounceResourceFactory]),
|
||||
|
||||
getPartition: Disposable.factory,
|
||||
})
|
||||
|
||||
exports.RemoteAdapter = RemoteAdapter
|
||||
26
@xen-orchestra/backups/RestoreMetadataBackup.js
Normal file
26
@xen-orchestra/backups/RestoreMetadataBackup.js
Normal file
@@ -0,0 +1,26 @@
|
||||
'use strict'
|
||||
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { PATH_DB_DUMP } = require('./PoolMetadataBackup.js')
|
||||
|
||||
exports.RestoreMetadataBackup = class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
this._backupId = backupId
|
||||
this._handler = handler
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const backupId = this._backupId
|
||||
const handler = this._handler
|
||||
const xapi = this._xapi
|
||||
|
||||
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
||||
task: xapi.task_create('Import pool metadata'),
|
||||
})
|
||||
} else {
|
||||
return String(await handler.readFile(`${backupId}/data.json`))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
import { join, resolve } from 'node:path/posix'
|
||||
|
||||
import { DIR_XO_POOL_METADATA_BACKUPS } from './RemoteAdapter.mjs'
|
||||
import { PATH_DB_DUMP } from './_runners/_PoolMetadataBackup.mjs'
|
||||
|
||||
export class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
this._backupId = backupId
|
||||
this._handler = handler
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const backupId = this._backupId
|
||||
const handler = this._handler
|
||||
const xapi = this._xapi
|
||||
|
||||
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
||||
task: xapi.task_create('Import pool metadata'),
|
||||
})
|
||||
} else {
|
||||
const metadata = JSON.parse(await handler.readFile(join(backupId, 'metadata.json')))
|
||||
const dataFileName = resolve('/', backupId, metadata.data ?? 'data.json').slice(1)
|
||||
const data = await handler.readFile(dataFileName)
|
||||
|
||||
// if data is JSON, sent it as a plain string, otherwise, consider the data as binary and encode it
|
||||
const isJson = dataFileName.endsWith('.json')
|
||||
return isJson ? data.toString() : { encoding: 'base64', data: data.toString('base64') }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
import CancelToken from 'promise-toolbox/CancelToken'
|
||||
import Zone from 'node-zone'
|
||||
'use strict'
|
||||
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
const Zone = require('node-zone')
|
||||
|
||||
const logAfterEnd = log => {
|
||||
const error = new Error('task has already ended')
|
||||
@@ -28,7 +30,7 @@ const serializeError = error =>
|
||||
|
||||
const $$task = Symbol('@xen-orchestra/backups/Task')
|
||||
|
||||
export class Task {
|
||||
class Task {
|
||||
static get cancelToken() {
|
||||
const task = Zone.current.data[$$task]
|
||||
return task !== undefined ? task.#cancelToken : CancelToken.none
|
||||
@@ -149,6 +151,7 @@ export class Task {
|
||||
})
|
||||
}
|
||||
}
|
||||
exports.Task = Task
|
||||
|
||||
for (const method of ['info', 'warning']) {
|
||||
Task[method] = (...args) => Zone.current.data[$$task]?.[method](...args)
|
||||
@@ -1,15 +1,17 @@
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import pTimeout from 'promise-toolbox/timeout'
|
||||
import { compileTemplate } from '@xen-orchestra/template'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { RemoteTimeoutError } from './_RemoteTimeoutError.mjs'
|
||||
'use strict'
|
||||
|
||||
export const DEFAULT_SETTINGS = {
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const pTimeout = require('promise-toolbox/timeout')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { RemoteTimeoutError } = require('./RemoteTimeoutError.js')
|
||||
const { runTask } = require('./runTask.js')
|
||||
|
||||
exports.DEFAULT_SETTINGS = {
|
||||
getRemoteTimeout: 300e3,
|
||||
reportWhen: 'failure',
|
||||
}
|
||||
|
||||
export const Abstract = class AbstractRunner {
|
||||
exports.AbstractBackupJob = class AbstractBackupJob {
|
||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||
this._config = config
|
||||
this._getRecord = getConnectedRecord
|
||||
@@ -1,20 +1,22 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
'use strict'
|
||||
|
||||
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||
import { PoolMetadataBackup } from './_PoolMetadataBackup.mjs'
|
||||
import { XoMetadataBackup } from './_XoMetadataBackup.mjs'
|
||||
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('../extractIdsFromSimplePattern.js')
|
||||
const { PoolMetadataBackup } = require('../PoolMetadataBackup.js')
|
||||
const { XoMetadataBackup } = require('./XoMetadataBackup.js')
|
||||
const { DEFAULT_SETTINGS, AbstractBackupJob } = require('./AbstractBackupJob.js')
|
||||
const { runTask } = require('./runTask.js')
|
||||
const { getAdaptersByRemote } = require('./getAdapterByRemote.js')
|
||||
|
||||
const DEFAULT_METADATA_SETTINGS = {
|
||||
retentionPoolMetadata: 0,
|
||||
retentionXoMetadata: 0,
|
||||
}
|
||||
|
||||
export const Metadata = class MetadataBackupRunner extends Abstract {
|
||||
exports.MetadatasBackupJob = class MetadatasBackupJob extends AbstractBackupJob {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
|
||||
@@ -1,4 +1,5 @@
|
||||
export class RemoteTimeoutError extends Error {
|
||||
'use strict'
|
||||
exports.RemoteTimeoutError = class RemoteTimeoutError extends Error {
|
||||
constructor(remoteId) {
|
||||
super('timeout while getting the remote ' + remoteId)
|
||||
this.remoteId = remoteId
|
||||
@@ -1,21 +1,24 @@
|
||||
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
'use strict'
|
||||
|
||||
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
import createStreamThrottle from './_createStreamThrottle.mjs'
|
||||
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
import { FullRemote } from './_vmRunners/FullRemote.mjs'
|
||||
import { IncrementalRemote } from './_vmRunners/IncrementalRemote.mjs'
|
||||
const { asyncMapSettled, asyncMap } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('../extractIdsFromSimplePattern.js')
|
||||
const { Task } = require('../Task.js')
|
||||
const createStreamThrottle = require('./createStreamThrottle.js')
|
||||
const { DEFAULT_SETTINGS, AbstractBackupJob } = require('./AbstractBackupJob.js')
|
||||
const { runTask } = require('./runTask.js')
|
||||
const { getAdaptersByRemote } = require('./getAdapterByRemote.js')
|
||||
const { IncrementalRemoteVmBackup } = require('./VmBackup/IncrementalRemoteVmBackup.js')
|
||||
const { FullRemoteVmBackup } = require('./VmBackup/FullRemoteVmBackup.js')
|
||||
|
||||
const DEFAULT_REMOTE_VM_SETTINGS = {
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
fullInterval: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxExportRate: 0,
|
||||
@@ -25,7 +28,7 @@ const DEFAULT_REMOTE_VM_SETTINGS = {
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
|
||||
exports.RemoteVmBackupJob = class RemoteVmBackupJob extends AbstractBackupJob {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_REMOTE_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
@@ -54,7 +57,13 @@ export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
|
||||
return
|
||||
}
|
||||
|
||||
const vmsUuids = await sourceRemoteAdapter.listAllVms()
|
||||
const vmsUuids = []
|
||||
await asyncMap(await sourceRemoteAdapter._handler.list('xo-vm-backups'), async entry => {
|
||||
// ignore hidden and lock files
|
||||
if (entry[0] !== '.' && !entry.endsWith('.lock')) {
|
||||
vmsUuids.push(entry)
|
||||
}
|
||||
})
|
||||
|
||||
Task.info('vms', { vms: vmsUuids })
|
||||
|
||||
@@ -79,11 +88,13 @@ export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
|
||||
}
|
||||
let vmBackup
|
||||
if (job.mode === 'delta') {
|
||||
vmBackup = new IncrementalRemote(opts)
|
||||
} else if (job.mode === 'full') {
|
||||
vmBackup = new FullRemote(opts)
|
||||
vmBackup = new IncrementalRemoteVmBackup(opts)
|
||||
} else {
|
||||
throw new Error(`Job mode ${job.mode} not implemented for mirror backup`)
|
||||
if (job.mode === 'full') {
|
||||
vmBackup = new FullRemoteVmBackup(opts)
|
||||
} else {
|
||||
throw new Error(`Job mode ${job.mode} not implemented`)
|
||||
}
|
||||
}
|
||||
|
||||
return runTask(taskStart, () => vmBackup.run())
|
||||
@@ -0,0 +1,100 @@
|
||||
'use strict'
|
||||
const { AbstractVmBackup } = require('./AbstractVmBackup')
|
||||
const { getVmBackupDir } = require('../../_getVmBackupDir')
|
||||
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
class AbstractRemoteVmBackup extends AbstractVmBackup {
|
||||
constructor({
|
||||
config,
|
||||
job,
|
||||
healthCheckSr,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
sourceRemoteAdapter,
|
||||
throttleStream,
|
||||
vmUuid,
|
||||
}) {
|
||||
super()
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
// the vm object is used in writers
|
||||
// remoteWriter only need vm.uuid
|
||||
// @todo : how to do better ?
|
||||
// missing tags for healthcheck
|
||||
this.vm = { uuid: vmUuid }
|
||||
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._sourceRemoteAdapter = sourceRemoteAdapter
|
||||
this._throttleStream = throttleStream
|
||||
|
||||
const allSettings = job.settings
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const RemoteWriter = this._getRemoteWriter()
|
||||
Object.keys(remoteAdapters).forEach(remoteId => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
if (targetSettings.exportRetention !== 0) {
|
||||
writers.add(new RemoteWriter({ backup: this, remoteId, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async _computeTransferList(predicate) {
|
||||
const vmBackups = await this._sourceRemoteAdapter.listVmBackups(this.vm.uuid, predicate)
|
||||
|
||||
const localMetada = {}
|
||||
Object.values(vmBackups).forEach(metadata => {
|
||||
const timestamp = metadata.timestamp
|
||||
localMetada[timestamp] = metadata
|
||||
})
|
||||
const nbRemotes = Object.keys(this.remoteAdapters).length
|
||||
const remoteMetadatas = {}
|
||||
await Promise.all(
|
||||
Object.values(this.remoteAdapters).map(async remoteAdapter => {
|
||||
const remoteMetadata = await remoteAdapter.listVmBackups(this.vm.uuid, predicate)
|
||||
remoteMetadata.forEach(metadata => {
|
||||
const timestamp = metadata.timestamp
|
||||
remoteMetadatas[timestamp] = (remoteMetadatas[timestamp] ?? 0) + 1
|
||||
})
|
||||
})
|
||||
)
|
||||
|
||||
let chain = []
|
||||
for (const timestamp in localMetada) {
|
||||
if (remoteMetadatas[timestamp] !== nbRemotes) {
|
||||
// this backup is not present in all the remote
|
||||
// should be retransfered if not found later
|
||||
chain.push(localMetada[timestamp])
|
||||
} else {
|
||||
// backup is present in local and remote : the chain has already been transferred
|
||||
chain = []
|
||||
}
|
||||
}
|
||||
|
||||
return chain
|
||||
}
|
||||
|
||||
async run($defer) {
|
||||
const handler = this._sourceRemoteAdapter._handler
|
||||
const sourceLock = await handler.lock(getVmBackupDir(this.vm.uuid))
|
||||
$defer(async () => {
|
||||
sourceLock.dispose()
|
||||
})
|
||||
await this._run()
|
||||
}
|
||||
}
|
||||
|
||||
exports.AbstractRemoteVmBackup = AbstractRemoteVmBackup
|
||||
decorateMethodsWith(AbstractRemoteVmBackup, {
|
||||
run: defer,
|
||||
})
|
||||
@@ -1,8 +1,9 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { Task } from '../../Task.mjs'
|
||||
'use strict'
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:AbstractVmRunner')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { Task } = require('../../Task')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { debug, warn } = createLogger('xo:backups:AbstractVmBackup')
|
||||
|
||||
class AggregateError extends Error {
|
||||
constructor(errors, message) {
|
||||
@@ -17,7 +18,7 @@ const asyncEach = async (iterable, fn, thisArg = iterable) => {
|
||||
}
|
||||
}
|
||||
|
||||
export const Abstract = class AbstractVmBackupRunner {
|
||||
class AbstractVmBackup {
|
||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||
async _callWriters(fn, step, parallel = true) {
|
||||
const writers = this._writers
|
||||
@@ -73,21 +74,17 @@ export const Abstract = class AbstractVmBackupRunner {
|
||||
}
|
||||
|
||||
// check if current VM has tags
|
||||
const tags = this._tags
|
||||
const { tags } = this.vm
|
||||
const intersect = settings.healthCheckVmsWithTags.some(t => tags.includes(t))
|
||||
|
||||
if (settings.healthCheckVmsWithTags.length !== 0 && !intersect) {
|
||||
// create a task to have an info in the logs and reports
|
||||
return Task.run(
|
||||
{
|
||||
name: 'health check',
|
||||
},
|
||||
() => {
|
||||
Task.info(`This VM doesn't match the health check's tags for this schedule`)
|
||||
}
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
await this._callWriters(writer => writer.healthCheck(), 'writer.healthCheck()')
|
||||
await this._callWriters(writer => writer.healthCheck(this._healthCheckSr), 'writer.healthCheck()')
|
||||
}
|
||||
async run() {
|
||||
throw new Error('not implemented')
|
||||
}
|
||||
}
|
||||
exports.AbstractVmBackup = AbstractVmBackup
|
||||
@@ -1,23 +1,24 @@
|
||||
import assert from 'node:assert'
|
||||
import groupBy from 'lodash/groupBy.js'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import { formatDateTime } from '@xen-orchestra/xapi'
|
||||
'use strict'
|
||||
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
import { Abstract } from './_Abstract.mjs'
|
||||
const assert = require('assert')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
const { getOldEntries } = require('./writers/_getOldEntries.js')
|
||||
|
||||
export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
const { Task } = require('../../Task.js')
|
||||
const { AbstractVmBackup } = require('./AbstractVmBackup.js')
|
||||
|
||||
class AbstractXapiVmBackup extends AbstractVmBackup {
|
||||
constructor({
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
remotes,
|
||||
schedule,
|
||||
settings,
|
||||
srs,
|
||||
@@ -31,11 +32,6 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
throw new Error('cannot backup a VM created by this very job')
|
||||
}
|
||||
|
||||
const currentOperations = Object.values(vm.current_operations)
|
||||
if (currentOperations.some(_ => _ === 'migrate_send' || _ === 'pool_migrate')) {
|
||||
throw new Error('cannot backup a VM currently being migrated')
|
||||
}
|
||||
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
@@ -43,23 +39,23 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
this.timestamp = undefined
|
||||
|
||||
// VM currently backed up
|
||||
const tags = (this._tags = vm.tags)
|
||||
this.vm = vm
|
||||
const { tags } = this.vm
|
||||
|
||||
// VM (snapshot) that is really exported
|
||||
this._exportedVm = undefined
|
||||
this._vm = vm
|
||||
this.exportedVm = undefined
|
||||
|
||||
this._fullVdisRequired = undefined
|
||||
this._getSnapshotNameLabel = getSnapshotNameLabel
|
||||
this._isIncremental = job.mode === 'delta'
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._jobId = job.id
|
||||
this._jobSnapshots = undefined
|
||||
this._throttleStream = throttleStream
|
||||
this._xapi = vm.$xapi
|
||||
|
||||
// Base VM for the export
|
||||
this._baseVm = undefined
|
||||
// Reference VM for the incremental export
|
||||
// if possible we willonly export the difference between thie vm and now
|
||||
this._vmComparisonBasis = undefined
|
||||
|
||||
// Settings for this specific run (job, schedule, VM)
|
||||
if (tags.includes('xo-memory-backup')) {
|
||||
@@ -69,32 +65,22 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
settings.offlineSnapshot = true
|
||||
}
|
||||
this._settings = settings
|
||||
|
||||
// Create writers
|
||||
{
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const [BackupWriter, ReplicationWriter] = this._getWriters()
|
||||
const [RemoteWriter, XapiWriter] = this._getWriters()
|
||||
|
||||
const allSettings = job.settings
|
||||
Object.entries(remoteAdapters).forEach(([remoteId, adapter]) => {
|
||||
Object.keys(remoteAdapters).forEach(remoteId => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
if (targetSettings.exportRetention !== 0) {
|
||||
writers.add(
|
||||
new BackupWriter({
|
||||
adapter,
|
||||
config,
|
||||
healthCheckSr,
|
||||
job,
|
||||
scheduleId: schedule.id,
|
||||
vmUuid: vm.uuid,
|
||||
remoteId,
|
||||
settings: targetSettings,
|
||||
})
|
||||
)
|
||||
writers.add(new RemoteWriter({ backup: this, remoteId, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
srs.forEach(sr => {
|
||||
@@ -103,17 +89,7 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
...allSettings[sr.uuid],
|
||||
}
|
||||
if (targetSettings.copyRetention !== 0) {
|
||||
writers.add(
|
||||
new ReplicationWriter({
|
||||
config,
|
||||
healthCheckSr,
|
||||
job,
|
||||
scheduleId: schedule.id,
|
||||
vmUuid: vm.uuid,
|
||||
sr,
|
||||
settings: targetSettings,
|
||||
})
|
||||
)
|
||||
writers.add(new XapiWriter({ backup: this, sr, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -122,7 +98,7 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
// ensure the VM itself does not have any backup metadata which would be
|
||||
// copied on manual snapshots and interfere with the backup jobs
|
||||
async _cleanMetadata() {
|
||||
const vm = this._vm
|
||||
const { vm } = this
|
||||
if ('xo:backup:job' in vm.other_config) {
|
||||
await vm.update_other_config({
|
||||
'xo:backup:datetime': null,
|
||||
@@ -136,7 +112,7 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
}
|
||||
|
||||
async _snapshot() {
|
||||
const vm = this._vm
|
||||
const { vm } = this
|
||||
const xapi = this._xapi
|
||||
|
||||
const settings = this._settings
|
||||
@@ -161,19 +137,19 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
'xo:backup:vm': vm.uuid,
|
||||
})
|
||||
|
||||
this._exportedVm = await xapi.getRecord('VM', snapshotRef)
|
||||
this.exportedVm = await xapi.getRecord('VM', snapshotRef)
|
||||
|
||||
return this._exportedVm.uuid
|
||||
return this.exportedVm.uuid
|
||||
})
|
||||
} else {
|
||||
this._exportedVm = vm
|
||||
this.exportedVm = vm
|
||||
this.timestamp = Date.now()
|
||||
}
|
||||
}
|
||||
|
||||
async _fetchJobSnapshots() {
|
||||
const jobId = this._jobId
|
||||
const vmRef = this._vm.$ref
|
||||
const vmRef = this.vm.$ref
|
||||
const xapi = this._xapi
|
||||
|
||||
const snapshotsRef = await xapi.getField('VM', vmRef, 'snapshots')
|
||||
@@ -192,7 +168,7 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
async _removeUnusedSnapshots() {
|
||||
const allSettings = this.job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
const baseVmRef = this._baseVm?.$ref
|
||||
const vmComparisonBasisRef = this._vmComparisonBasis?.$ref
|
||||
|
||||
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
|
||||
const xapi = this._xapi
|
||||
@@ -200,10 +176,10 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
const settings = {
|
||||
...baseSettings,
|
||||
...allSettings[scheduleId],
|
||||
...allSettings[this._vm.uuid],
|
||||
...allSettings[this.vm.uuid],
|
||||
}
|
||||
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
if ($ref !== vmComparisonBasisRef) {
|
||||
return xapi.VM_destroy($ref)
|
||||
}
|
||||
})
|
||||
@@ -242,12 +218,13 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
|
||||
// will only do something during incremental Backup
|
||||
await this._selectBaseVm()
|
||||
|
||||
await this._cleanMetadata()
|
||||
await this._removeUnusedSnapshots()
|
||||
|
||||
const vm = this._vm
|
||||
const { vm } = this
|
||||
const isRunning = vm.power_state === 'Running'
|
||||
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
|
||||
if (startAfter) {
|
||||
@@ -261,15 +238,7 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
}
|
||||
|
||||
if (this._writers.size !== 0) {
|
||||
const { pool_migrate = null, migrate_send = null } = this._exportedVm.blocked_operations
|
||||
|
||||
const reason = 'VM migration is blocked during backup'
|
||||
await this._exportedVm.update_blocked_operations({ pool_migrate: reason, migrate_send: reason })
|
||||
try {
|
||||
await this._copy()
|
||||
} finally {
|
||||
await this._exportedVm.update_blocked_operations({ pool_migrate, migrate_send })
|
||||
}
|
||||
await this._copy()
|
||||
}
|
||||
} finally {
|
||||
if (startAfter) {
|
||||
@@ -282,7 +251,8 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
await this._healthCheck()
|
||||
}
|
||||
}
|
||||
exports.AbstractXapiVmBackup = AbstractXapiVmBackup
|
||||
|
||||
decorateMethodsWith(AbstractXapi, {
|
||||
decorateMethodsWith(AbstractXapiVmBackup, {
|
||||
run: defer,
|
||||
})
|
||||
@@ -0,0 +1,43 @@
|
||||
'use strict'
|
||||
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { AbstractRemoteVmBackup } = require('./AbstractRemoteVmBackup')
|
||||
const { FullRemoteWriter } = require('./writers/FullRemoteWriter')
|
||||
const { forkStreamUnpipe } = require('../forkStreamUnpipe')
|
||||
|
||||
const FullRemoteVmBackup = class FullRemoteVmBackup extends AbstractRemoteVmBackup {
|
||||
_getRemoteWriter() {
|
||||
return FullRemoteWriter
|
||||
}
|
||||
async _run($defer) {
|
||||
const transferList = await this._computeTransferList(({ mode }) => mode === 'full')
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
for (const metadata of transferList) {
|
||||
const stream = await this._sourceRemoteAdapter.readFullVmBackup(metadata)
|
||||
// @todo should skip if backup is already there (success on only one remote)
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp: metadata.timestamp,
|
||||
vm: metadata.vm,
|
||||
vmSnapshot: metadata.vmSnapshot,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exports.FullRemoteVmBackup = FullRemoteVmBackup
|
||||
decorateMethodsWith(FullRemoteVmBackup, {
|
||||
_run: defer,
|
||||
})
|
||||
@@ -0,0 +1,63 @@
|
||||
'use strict'
|
||||
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
|
||||
const { forkStreamUnpipe } = require('../forkStreamUnpipe.js')
|
||||
const { watchStreamSize } = require('../../_watchStreamSize.js')
|
||||
const { FullRemoteWriter } = require('./writers/FullRemoteWriter.js')
|
||||
const { FullXapiWriter } = require('./writers/FullXapiWriter.js')
|
||||
const { AbstractXapiVmBackup } = require('./AbstractXapiVMBackup.js')
|
||||
|
||||
const { debug } = createLogger('xo:backups:FullVmBackup')
|
||||
|
||||
class FullXapiVmBackup extends AbstractXapiVmBackup {
|
||||
_getWriters() {
|
||||
return [FullRemoteWriter, FullXapiWriter]
|
||||
}
|
||||
|
||||
_mustDoSnapshot() {
|
||||
const { vm } = this
|
||||
|
||||
const settings = this._settings
|
||||
return (
|
||||
settings.unconditionalSnapshot ||
|
||||
(!settings.offlineBackup && vm.power_state === 'Running') ||
|
||||
settings.snapshotRetention !== 0
|
||||
)
|
||||
}
|
||||
_selectBaseVm() {}
|
||||
|
||||
async _copy() {
|
||||
const { compression } = this.job
|
||||
const stream = this._throttleStream(
|
||||
await this._xapi.VM_export(this.exportedVm.$ref, {
|
||||
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
|
||||
useSnapshot: false,
|
||||
})
|
||||
)
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
sizeContainer,
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
|
||||
const { size } = sizeContainer
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
this._healthCheck()
|
||||
}
|
||||
}
|
||||
exports.FullXapiVmBackup = FullXapiVmBackup
|
||||
@@ -1,16 +1,22 @@
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import assert from 'node:assert'
|
||||
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
'use strict'
|
||||
const assert = require('node:assert')
|
||||
|
||||
import { AbstractRemote } from './_AbstractRemote.mjs'
|
||||
import { forkDeltaExport } from './_forkDeltaExport.mjs'
|
||||
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { AbstractRemoteVmBackup } = require('./AbstractRemoteVmBackup')
|
||||
const { mapValues } = require('lodash')
|
||||
const { IncrementalRemoteWriter } = require('./writers/IncrementalRemoteWriter')
|
||||
const { forkStreamUnpipe } = require('../forkStreamUnpipe')
|
||||
const { Task } = require('../../Task')
|
||||
|
||||
class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
const forkDeltaExport = deltaExport =>
|
||||
Object.create(deltaExport, {
|
||||
streams: {
|
||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
||||
},
|
||||
})
|
||||
|
||||
class IncrementalRemoteVmBackup extends AbstractRemoteVmBackup {
|
||||
_getRemoteWriter() {
|
||||
return IncrementalRemoteWriter
|
||||
}
|
||||
@@ -29,13 +35,7 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isBase: metadata.isBase }), 'writer.prepare()')
|
||||
const incrementalExport = await this._sourceRemoteAdapter.readIncrementalVmBackup(metadata, undefined, {
|
||||
useChain: false,
|
||||
})
|
||||
|
||||
const isVhdDifferencing = {}
|
||||
|
||||
await asyncEach(Object.entries(incrementalExport.streams), async ([key, stream]) => {
|
||||
isVhdDifferencing[key] = await isVhdDifferencingDisk(stream)
|
||||
useSynthetic: false,
|
||||
})
|
||||
|
||||
incrementalExport.streams = mapValues(incrementalExport.streams, this._throttleStream)
|
||||
@@ -43,7 +43,6 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(incrementalExport),
|
||||
isVhdDifferencing,
|
||||
timestamp: metadata.timestamp,
|
||||
vm: metadata.vm,
|
||||
vmSnapshot: metadata.vmSnapshot,
|
||||
@@ -51,16 +50,15 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
'writer.transfer()'
|
||||
)
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
// for healthcheck
|
||||
this._tags = metadata.vm.tags
|
||||
}
|
||||
this._healthCheck()
|
||||
} else {
|
||||
Task.info('No new data to upload for this VM')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const IncrementalRemote = IncrementalRemoteVmBackupRunner
|
||||
decorateMethodsWith(IncrementalRemoteVmBackupRunner, {
|
||||
exports.IncrementalRemoteVmBackup = IncrementalRemoteVmBackup
|
||||
decorateMethodsWith(IncrementalRemoteVmBackup, {
|
||||
_run: defer,
|
||||
})
|
||||
@@ -1,26 +1,33 @@
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { pipeline } from 'node:stream'
|
||||
import findLast from 'lodash/findLast.js'
|
||||
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
|
||||
import keyBy from 'lodash/keyBy.js'
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
import vhdStreamValidator from 'vhd-lib/vhdStreamValidator.js'
|
||||
'use strict'
|
||||
|
||||
import { AbstractXapi } from './_AbstractXapi.mjs'
|
||||
import { exportIncrementalVm } from '../../_incrementalVm.mjs'
|
||||
import { forkDeltaExport } from './_forkDeltaExport.mjs'
|
||||
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
|
||||
import { IncrementalXapiWriter } from '../_writers/IncrementalXapiWriter.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
import { watchStreamSize } from '../../_watchStreamSize.mjs'
|
||||
const findLast = require('lodash/findLast.js')
|
||||
const keyBy = require('lodash/keyBy.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const vhdStreamValidator = require('vhd-lib/vhdStreamValidator.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { pipeline } = require('node:stream')
|
||||
|
||||
const { debug } = createLogger('xo:backups:IncrementalXapiVmBackup')
|
||||
const { exportIncrementalVm } = require('../../_incrementalVm.js')
|
||||
const { forkStreamUnpipe } = require('../forkStreamUnpipe.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
const { watchStreamSize } = require('../../_watchStreamSize.js')
|
||||
|
||||
const { IncrementalRemoteWriter } = require('./writers/IncrementalRemoteWriter.js')
|
||||
const { IncrementalXapiWriter } = require('./writers/IncrementalXapiWriter.js')
|
||||
const { AbstractXapiVmBackup } = require('./AbstractXapiVMBackup.js')
|
||||
|
||||
const { debug } = createLogger('xo:backups:VmBackup')
|
||||
|
||||
const forkDeltaExport = deltaExport =>
|
||||
Object.create(deltaExport, {
|
||||
streams: {
|
||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
||||
},
|
||||
})
|
||||
const noop = Function.prototype
|
||||
|
||||
export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXapi {
|
||||
class IncrementalXapiVmBackup extends AbstractXapiVmBackup {
|
||||
_getWriters() {
|
||||
return [IncrementalRemoteWriter, IncrementalXapiWriter]
|
||||
}
|
||||
@@ -30,60 +37,50 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
}
|
||||
|
||||
async _copy() {
|
||||
const baseVm = this._baseVm
|
||||
const vm = this._vm
|
||||
const exportedVm = this._exportedVm
|
||||
const { exportedVm } = this
|
||||
const vmComparisonBasis = this._vmComparisonBasis
|
||||
const fullVdisRequired = this._fullVdisRequired
|
||||
|
||||
const isFull = fullVdisRequired === undefined || fullVdisRequired.size !== 0
|
||||
const isBase = fullVdisRequired === undefined || fullVdisRequired.size !== 0
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isFull }), 'writer.prepare()')
|
||||
await this._callWriters(writer => writer.prepare({ isBase }), 'writer.prepare()')
|
||||
|
||||
const deltaExport = await exportIncrementalVm(exportedVm, baseVm, {
|
||||
const incrementalExport = await exportIncrementalVm(exportedVm, vmComparisonBasis, {
|
||||
fullVdisRequired,
|
||||
nbdConcurrency: this._settings.nbdConcurrency,
|
||||
preferNbd: this._settings.preferNbd,
|
||||
})
|
||||
// since NBD is network based, if one disk use nbd , all the disk use them
|
||||
// except the suspended VDI
|
||||
if (Object.values(deltaExport.streams).some(({ _nbd }) => _nbd)) {
|
||||
if (Object.values(incrementalExport.streams).some(({ _nbd }) => _nbd)) {
|
||||
Task.info('Transfer data using NBD')
|
||||
}
|
||||
|
||||
const isVhdDifferencing = {}
|
||||
// since isVhdDifferencingDisk is reading and unshifting data in stream
|
||||
// it should be done BEFORE any other stream transform
|
||||
await asyncEach(Object.entries(deltaExport.streams), async ([key, stream]) => {
|
||||
isVhdDifferencing[key] = await isVhdDifferencingDisk(stream)
|
||||
})
|
||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||
const sizeContainers = mapValues(incrementalExport.streams, stream => watchStreamSize(stream))
|
||||
|
||||
if (this._settings.validateVhdStreams) {
|
||||
deltaExport.streams = mapValues(deltaExport.streams, stream => pipeline(stream, vhdStreamValidator, noop))
|
||||
incrementalExport.streams = mapValues(incrementalExport.streams, stream =>
|
||||
pipeline(stream, vhdStreamValidator, noop)
|
||||
)
|
||||
}
|
||||
deltaExport.streams = mapValues(deltaExport.streams, this._throttleStream)
|
||||
|
||||
incrementalExport.streams = mapValues(incrementalExport.streams, this._throttleStream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(deltaExport),
|
||||
isVhdDifferencing,
|
||||
deltaExport: forkDeltaExport(incrementalExport),
|
||||
sizeContainers,
|
||||
timestamp,
|
||||
vm,
|
||||
vmSnapshot: exportedVm,
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
|
||||
this._baseVm = exportedVm
|
||||
this._vmComparisonBasis = exportedVm
|
||||
|
||||
if (baseVm !== undefined) {
|
||||
if (vmComparisonBasis !== undefined) {
|
||||
await exportedVm.update_other_config(
|
||||
'xo:backup:deltaChainLength',
|
||||
String(+(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1)
|
||||
String(+(vmComparisonBasis.other_config['xo:backup:deltaChainLength'] ?? 0) + 1)
|
||||
)
|
||||
}
|
||||
|
||||
@@ -116,11 +113,11 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
const fullInterval = this._settings.fullInterval
|
||||
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
|
||||
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
|
||||
debug('not using base VM becaust fullInterval reached')
|
||||
debug('not using base VM because fullInterval reached')
|
||||
return
|
||||
}
|
||||
|
||||
const srcVdis = keyBy(await xapi.getRecords('VDI', await this._vm.$getDisks()), '$ref')
|
||||
const srcVdis = keyBy(await xapi.getRecords('VDI', await this.vm.$getDisks()), '$ref')
|
||||
|
||||
// resolve full record
|
||||
baseVm = await xapi.getRecord('VM', baseVm.$ref)
|
||||
@@ -169,7 +166,8 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
}
|
||||
})
|
||||
|
||||
this._baseVm = baseVm
|
||||
this._vmComparisonBasis = baseVm
|
||||
this._fullVdisRequired = fullVdisRequired
|
||||
}
|
||||
}
|
||||
exports.IncrementalXapiVmBackup = IncrementalXapiVmBackup
|
||||
@@ -1,11 +1,13 @@
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
'use strict'
|
||||
|
||||
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
|
||||
import { AbstractFullWriter } from './_AbstractFullWriter.mjs'
|
||||
const { formatFilenameDate } = require('../../../_filenameDate.js')
|
||||
const { getOldEntries } = require('./_getOldEntries.js')
|
||||
const { Task } = require('../../../Task.js')
|
||||
|
||||
export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
|
||||
const { MixinRemoteWriter } = require('./_MixinRemoteWriter.js')
|
||||
const { AbstractFullWriter } = require('./_AbstractFullWriter.js')
|
||||
|
||||
exports.FullRemoteWriter = class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
|
||||
@@ -24,17 +26,15 @@ export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
|
||||
)
|
||||
}
|
||||
|
||||
async _run({ maxStreamLength, timestamp, sizeContainer, stream, streamLength, vm, vmSnapshot }) {
|
||||
async _run({ timestamp, sizeContainer, stream, vm = this._backup.vm, vmSnapshot = this._backup.exportedVm }) {
|
||||
const backup = this._backup
|
||||
const settings = this._settings
|
||||
const job = this._job
|
||||
const scheduleId = this._scheduleId
|
||||
|
||||
const { job, scheduleId } = backup
|
||||
|
||||
const adapter = this._adapter
|
||||
let metadata = await this._isAlreadyTransferred(timestamp)
|
||||
if (metadata !== undefined) {
|
||||
// @todo : should skip backup while being vigilant to not stuck the forked stream
|
||||
Task.info('This backup has already been transfered')
|
||||
}
|
||||
|
||||
// TODO: clean VM backup directory
|
||||
|
||||
const oldBackups = getOldEntries(
|
||||
settings.exportRetention - 1,
|
||||
@@ -47,7 +47,7 @@ export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
|
||||
const dataBasename = basename + '.xva'
|
||||
const dataFilename = this._vmBackupDir + '/' + dataBasename
|
||||
|
||||
metadata = {
|
||||
const metadata = {
|
||||
jobId: job.id,
|
||||
mode: job.mode,
|
||||
scheduleId,
|
||||
@@ -65,13 +65,11 @@ export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
|
||||
|
||||
await Task.run({ name: 'transfer' }, async () => {
|
||||
await adapter.outputStream(dataFilename, stream, {
|
||||
maxStreamLength,
|
||||
streamLength,
|
||||
validator: tmpPath => adapter.isValidXva(tmpPath),
|
||||
})
|
||||
return { size: sizeContainer.size }
|
||||
return { size: sizeContainer?.size }
|
||||
})
|
||||
metadata.size = sizeContainer.size
|
||||
metadata.size = sizeContainer?.size ?? 0
|
||||
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadata)
|
||||
|
||||
if (!deleteFirst) {
|
||||
@@ -1,16 +1,18 @@
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import { formatDateTime } from '@xen-orchestra/xapi'
|
||||
'use strict'
|
||||
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
import { AbstractFullWriter } from './_AbstractFullWriter.mjs'
|
||||
import { MixinXapiWriter } from './_MixinXapiWriter.mjs'
|
||||
import { listReplicatedVms } from './_listReplicatedVms.mjs'
|
||||
const { formatFilenameDate } = require('../../../_filenameDate.js')
|
||||
const { getOldEntries } = require('./_getOldEntries.js')
|
||||
const { Task } = require('../../../Task.js')
|
||||
|
||||
export class FullXapiWriter extends MixinXapiWriter(AbstractFullWriter) {
|
||||
const { AbstractFullWriter } = require('./_AbstractFullWriter.js')
|
||||
const { MixinXapiWriter } = require('./_MixinXapiWriter.js')
|
||||
const { listReplicatedVms } = require('./_listReplicatedVms.js')
|
||||
|
||||
exports.FullXapiWriter = class FullXapiWriter extends MixinXapiWriter(AbstractFullWriter) {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
|
||||
@@ -30,11 +32,10 @@ export class FullXapiWriter extends MixinXapiWriter(AbstractFullWriter) {
|
||||
)
|
||||
}
|
||||
|
||||
async _run({ timestamp, sizeContainer, stream, vm }) {
|
||||
async _run({ timestamp, sizeContainer, stream }) {
|
||||
const sr = this._sr
|
||||
const settings = this._settings
|
||||
const job = this._job
|
||||
const scheduleId = this._scheduleId
|
||||
const { job, scheduleId, vm } = this._backup
|
||||
|
||||
const { uuid: srUuid, $xapi: xapi } = sr
|
||||
|
||||
@@ -1,33 +1,35 @@
|
||||
import assert from 'node:assert'
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { chainVhd, checkVhdChain, openVhd, VhdAbstract } from 'vhd-lib'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { decorateClass } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import { dirname } from 'node:path'
|
||||
'use strict'
|
||||
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
import { TAG_BASE_DELTA } from '../../_incrementalVm.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
const assert = require('assert')
|
||||
const map = require('lodash/map.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { chainVhd, checkVhdChain, openVhd, VhdAbstract } = require('vhd-lib')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { decorateClass } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { dirname } = require('path')
|
||||
|
||||
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
|
||||
import { AbstractIncrementalWriter } from './_AbstractIncrementalWriter.mjs'
|
||||
import { checkVhd } from './_checkVhd.mjs'
|
||||
import { packUuid } from './_packUuid.mjs'
|
||||
import { Disposable } from 'promise-toolbox'
|
||||
const { formatFilenameDate } = require('../../../_filenameDate.js')
|
||||
const { getOldEntries } = require('./_getOldEntries.js')
|
||||
const { Task } = require('../../../Task.js')
|
||||
|
||||
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
|
||||
const { MixinRemoteWriter } = require('./_MixinRemoteWriter.js')
|
||||
const { AbstractIncrementalWriter } = require('./_AbstractIncrementalWriter.js')
|
||||
const { checkVhd } = require('./_checkVhd.js')
|
||||
const { packUuid } = require('./_packUuid.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
|
||||
export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWriter) {
|
||||
const { warn } = createLogger('xo:backups:IncrementalRemoteWriter')
|
||||
|
||||
class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWriter) {
|
||||
async checkBaseVdis(baseUuidToSrcVdi) {
|
||||
const { handler } = this._adapter
|
||||
const backup = this._backup
|
||||
const adapter = this._adapter
|
||||
|
||||
const vdisDir = `${this._vmBackupDir}/vdis/${this._job.id}`
|
||||
const vdisDir = `${this._vmBackupDir}/vdis/${backup.job.id}`
|
||||
|
||||
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdi]) => {
|
||||
let found = false
|
||||
@@ -68,13 +70,13 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
return this._cleanVm({ merge: true })
|
||||
}
|
||||
|
||||
prepare({ isFull }) {
|
||||
prepare({ isBase }) {
|
||||
// create the task related to this export and ensure all methods are called in this context
|
||||
const task = new Task({
|
||||
name: 'export',
|
||||
data: {
|
||||
id: this._remoteId,
|
||||
isFull,
|
||||
isBase,
|
||||
type: 'remote',
|
||||
},
|
||||
})
|
||||
@@ -89,12 +91,11 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
async _prepare() {
|
||||
const adapter = this._adapter
|
||||
const settings = this._settings
|
||||
const scheduleId = this._scheduleId
|
||||
const vmUuid = this._vmUuid
|
||||
const { scheduleId, vm } = this._backup
|
||||
|
||||
const oldEntries = getOldEntries(
|
||||
settings.exportRetention - 1,
|
||||
await adapter.listVmBackups(vmUuid, _ => _.mode === 'delta' && _.scheduleId === scheduleId)
|
||||
await adapter.listVmBackups(vm.uuid, _ => _.mode === 'delta' && _.scheduleId === scheduleId)
|
||||
)
|
||||
this._oldEntries = oldEntries
|
||||
|
||||
@@ -129,23 +130,20 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
|
||||
// delete sequentially from newest to oldest to avoid unnecessary merges
|
||||
for (let i = oldEntries.length; i-- > 0; ) {
|
||||
await adapter.deleteDeltaVmBackups([oldEntries[i]])
|
||||
await adapter.deleteIncrementalVmBackups([oldEntries[i]])
|
||||
}
|
||||
}
|
||||
|
||||
async _transfer($defer, { isVhdDifferencing, timestamp, deltaExport, vm, vmSnapshot }) {
|
||||
async _transfer($defer, { timestamp, deltaExport, vm = this._backup.vm, vmSnapshot = this._backup.exportedVm }) {
|
||||
const adapter = this._adapter
|
||||
const job = this._job
|
||||
const scheduleId = this._scheduleId
|
||||
const settings = this._settings
|
||||
const backup = this._backup
|
||||
|
||||
const { job, scheduleId } = backup
|
||||
|
||||
const jobId = job.id
|
||||
const handler = adapter.handler
|
||||
|
||||
let metadataContent = await this._isAlreadyTransferred(timestamp)
|
||||
if (metadataContent !== undefined) {
|
||||
// @todo : should skip backup while being vigilant to not stuck the forked stream
|
||||
Task.info('This backup has already been transfered')
|
||||
}
|
||||
// TODO: clean VM backup directory
|
||||
|
||||
const basename = formatFilenameDate(timestamp)
|
||||
const vhds = mapValues(
|
||||
@@ -160,8 +158,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
}/${adapter.getVhdFileName(basename)}`
|
||||
)
|
||||
|
||||
metadataContent = {
|
||||
isVhdDifferencing,
|
||||
const metadataContent = {
|
||||
jobId,
|
||||
mode: job.mode,
|
||||
scheduleId,
|
||||
@@ -174,16 +171,16 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
vm,
|
||||
vmSnapshot,
|
||||
}
|
||||
|
||||
const { size } = await Task.run({ name: 'transfer' }, async () => {
|
||||
let transferSize = 0
|
||||
await asyncEach(
|
||||
Object.entries(deltaExport.vdis),
|
||||
async ([id, vdi]) => {
|
||||
await Promise.all(
|
||||
map(deltaExport.vdis, async (vdi, id) => {
|
||||
const path = `${this._vmBackupDir}/${vhds[id]}`
|
||||
|
||||
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
|
||||
const isDelta = vdi.other_config['xo:base_delta'] !== undefined
|
||||
let parentPath
|
||||
if (isDifferencing) {
|
||||
if (isDelta) {
|
||||
const vdiDir = dirname(path)
|
||||
parentPath = (
|
||||
await handler.list(vdiDir, {
|
||||
@@ -194,11 +191,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
.sort()
|
||||
.pop()
|
||||
|
||||
assert.notStrictEqual(
|
||||
parentPath,
|
||||
undefined,
|
||||
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config[TAG_BASE_DELTA]}`
|
||||
)
|
||||
assert.notStrictEqual(parentPath, undefined, `missing parent of ${id}`)
|
||||
|
||||
parentPath = parentPath.slice(1) // remove leading slash
|
||||
|
||||
@@ -206,20 +199,23 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
await checkVhd(handler, parentPath)
|
||||
}
|
||||
|
||||
// don't write it as transferSize += await async function
|
||||
// since i += await asyncFun lead to race condition
|
||||
// as explained : https://eslint.org/docs/latest/rules/require-atomic-updates
|
||||
const transferSizeOneDisk = await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
|
||||
transferSize += await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
|
||||
// no checksum for VHDs, because they will be invalidated by
|
||||
// merges and chainings
|
||||
checksum: false,
|
||||
validator: tmpPath => checkVhd(handler, tmpPath),
|
||||
writeBlockConcurrency: this._config.writeBlockConcurrency,
|
||||
writeBlockConcurrency: this._backup.config.writeBlockConcurrency,
|
||||
})
|
||||
transferSize += transferSizeOneDisk
|
||||
|
||||
if (isDifferencing) {
|
||||
await chainVhd(handler, parentPath, handler, path)
|
||||
if (isDelta) {
|
||||
try {
|
||||
await chainVhd(handler, parentPath, handler, path)
|
||||
} catch (err) {
|
||||
// @todo : check why if chains with full disk
|
||||
if (err.message !== 'cannot chain disk of type 3') {
|
||||
throw err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// set the correct UUID in the VHD
|
||||
@@ -228,20 +224,15 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
await vhd.readBlockAllocationTable() // required by writeFooter()
|
||||
await vhd.writeFooter()
|
||||
})
|
||||
},
|
||||
{
|
||||
concurrency: settings.diskPerVmConcurrency,
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
return { size: transferSize }
|
||||
})
|
||||
metadataContent.size = size
|
||||
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadataContent)
|
||||
|
||||
// TODO: run cleanup?
|
||||
}
|
||||
}
|
||||
decorateClass(IncrementalRemoteWriter, {
|
||||
exports.IncrementalRemoteWriter = decorateClass(IncrementalRemoteWriter, {
|
||||
_transfer: defer,
|
||||
})
|
||||
@@ -0,0 +1,133 @@
|
||||
'use strict'
|
||||
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { formatFilenameDate } = require('../../../_filenameDate.js')
|
||||
const { getOldEntries } = require('./_getOldEntries.js')
|
||||
const { importIncrementalVm, TAG_COPY_SRC } = require('../../../_incrementalVm.js')
|
||||
const { Task } = require('../../../Task.js')
|
||||
|
||||
const { AbstractIncrementalWriter } = require('./_AbstractIncrementalWriter.js')
|
||||
const { MixinXapiWriter } = require('./_MixinXapiWriter.js')
|
||||
const { listReplicatedVms } = require('./_listReplicatedVms.js')
|
||||
|
||||
exports.IncrementalXapiWriter = class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWriter) {
|
||||
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
||||
const sr = this._sr
|
||||
const replicatedVm = listReplicatedVms(sr.$xapi, this._backup.job.id, sr.uuid, this._backup.vm.uuid).find(
|
||||
vm => vm.other_config[TAG_COPY_SRC] === baseVm.uuid
|
||||
)
|
||||
if (replicatedVm === undefined) {
|
||||
return baseUuidToSrcVdi.clear()
|
||||
}
|
||||
|
||||
const xapi = replicatedVm.$xapi
|
||||
const replicatedVdis = new Set(
|
||||
await asyncMap(await replicatedVm.$getDisks(), async vdiRef => {
|
||||
const otherConfig = await xapi.getField('VDI', vdiRef, 'other_config')
|
||||
return otherConfig[TAG_COPY_SRC]
|
||||
})
|
||||
)
|
||||
|
||||
for (const uuid of baseUuidToSrcVdi.keys()) {
|
||||
if (!replicatedVdis.has(uuid)) {
|
||||
baseUuidToSrcVdi.delete(uuid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prepare({ isBase }) {
|
||||
// create the task related to this export and ensure all methods are called in this context
|
||||
const task = new Task({
|
||||
name: 'export',
|
||||
data: {
|
||||
id: this._sr.uuid,
|
||||
isBase,
|
||||
name_label: this._sr.name_label,
|
||||
type: 'SR',
|
||||
},
|
||||
})
|
||||
this.transfer = task.wrapFn(this.transfer)
|
||||
this.cleanup = task.wrapFn(this.cleanup)
|
||||
this.healthCheck = task.wrapFn(this.healthCheck, true)
|
||||
|
||||
return task.run(() => this._prepare())
|
||||
}
|
||||
|
||||
async _prepare() {
|
||||
const settings = this._settings
|
||||
const { uuid: srUuid, $xapi: xapi } = this._sr
|
||||
const { scheduleId, vm } = this._backup
|
||||
|
||||
// delete previous interrupted copies
|
||||
ignoreErrors.call(asyncMapSettled(listReplicatedVms(xapi, scheduleId, undefined, vm.uuid), vm => vm.$destroy))
|
||||
|
||||
this._oldEntries = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vm.uuid))
|
||||
|
||||
if (settings.deleteFirst) {
|
||||
await this._deleteOldEntries()
|
||||
}
|
||||
}
|
||||
|
||||
async cleanup() {
|
||||
if (!this._settings.deleteFirst) {
|
||||
await this._deleteOldEntries()
|
||||
}
|
||||
}
|
||||
|
||||
async _deleteOldEntries() {
|
||||
return asyncMapSettled(this._oldEntries, vm => vm.$destroy())
|
||||
}
|
||||
|
||||
async _transfer({ timestamp, deltaExport, sizeContainers }) {
|
||||
const { _warmMigration } = this._settings
|
||||
const sr = this._sr
|
||||
const { job, scheduleId, vm } = this._backup
|
||||
|
||||
const { uuid: srUuid, $xapi: xapi } = sr
|
||||
|
||||
let targetVmRef
|
||||
await Task.run({ name: 'transfer' }, async () => {
|
||||
targetVmRef = await importIncrementalVm(
|
||||
{
|
||||
__proto__: deltaExport,
|
||||
vm: {
|
||||
...deltaExport.vm,
|
||||
tags: _warmMigration ? deltaExport.vm.tags : [...deltaExport.vm.tags, 'Continuous Replication'],
|
||||
},
|
||||
},
|
||||
sr
|
||||
)
|
||||
return {
|
||||
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
|
||||
}
|
||||
})
|
||||
this._targetVmRef = targetVmRef
|
||||
const targetVm = await xapi.getRecord('VM', targetVmRef)
|
||||
|
||||
await Promise.all([
|
||||
// warm migration does not disable HA , since the goal is to start the new VM in production
|
||||
!_warmMigration &&
|
||||
targetVm.ha_restart_priority !== '' &&
|
||||
Promise.all([targetVm.set_ha_restart_priority(''), targetVm.add_tags('HA disabled')]),
|
||||
targetVm.set_name_label(`${vm.name_label} - ${job.name} - (${formatFilenameDate(timestamp)})`),
|
||||
asyncMap(['start', 'start_on'], op =>
|
||||
targetVm.update_blocked_operations(
|
||||
op,
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
)
|
||||
),
|
||||
targetVm.update_other_config({
|
||||
'xo:backup:sr': srUuid,
|
||||
|
||||
// these entries need to be added in case of offline backup
|
||||
'xo:backup:datetime': formatDateTime(timestamp),
|
||||
'xo:backup:job': job.id,
|
||||
'xo:backup:schedule': scheduleId,
|
||||
'xo:backup:vm': vm.uuid,
|
||||
}),
|
||||
])
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
'use strict'
|
||||
|
||||
const { AbstractWriter } = require('./_AbstractWriter.js')
|
||||
|
||||
exports.AbstractFullWriter = class AbstractFullWriter extends AbstractWriter {
|
||||
async run({ timestamp, sizeContainer, stream }) {
|
||||
try {
|
||||
return await this._run({ timestamp, sizeContainer, stream })
|
||||
} finally {
|
||||
// ensure stream is properly closed
|
||||
stream.destroy()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
import { AbstractWriter } from './_AbstractWriter.mjs'
|
||||
'use strict'
|
||||
|
||||
export class AbstractIncrementalWriter extends AbstractWriter {
|
||||
const { AbstractWriter } = require('./_AbstractWriter.js')
|
||||
|
||||
exports.AbstractIncrementalWriter = class AbstractIncrementalWriter extends AbstractWriter {
|
||||
checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
@@ -9,13 +11,13 @@ export class AbstractIncrementalWriter extends AbstractWriter {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
prepare({ isFull }) {
|
||||
prepare({ isBase }) {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async transfer({ deltaExport, ...other }) {
|
||||
async transfer({ timestamp, deltaExport, sizeContainers }) {
|
||||
try {
|
||||
return await this._transfer({ deltaExport, ...other })
|
||||
return await this._transfer({ timestamp, deltaExport, sizeContainers })
|
||||
} finally {
|
||||
// ensure all streams are properly closed
|
||||
for (const stream of Object.values(deltaExport.streams)) {
|
||||
@@ -0,0 +1,14 @@
|
||||
'use strict'
|
||||
|
||||
exports.AbstractWriter = class AbstractWriter {
|
||||
constructor({ backup, settings }) {
|
||||
this._backup = backup
|
||||
this._settings = settings
|
||||
}
|
||||
|
||||
beforeBackup() {}
|
||||
|
||||
afterBackup() {}
|
||||
|
||||
healthCheck(sr) {}
|
||||
}
|
||||
@@ -1,27 +1,29 @@
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { join } from 'node:path'
|
||||
import assert from 'node:assert'
|
||||
'use strict'
|
||||
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
|
||||
import { HealthCheckVmBackup } from '../../HealthCheckVmBackup.mjs'
|
||||
import { ImportVmBackup } from '../../ImportVmBackup.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
import * as MergeWorker from '../../merge-worker/index.mjs'
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { join } = require('path')
|
||||
|
||||
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
|
||||
const assert = require('assert')
|
||||
const { formatFilenameDate } = require('../../../_filenameDate.js')
|
||||
const { getVmBackupDir } = require('../../../_getVmBackupDir.js')
|
||||
const { HealthCheckVmBackup } = require('../../../HealthCheckVmBackup.js')
|
||||
const { ImportVmBackup } = require('../../../ImportVmBackup.js')
|
||||
const { Task } = require('../../../Task.js')
|
||||
const MergeWorker = require('../../../merge-worker/index.js')
|
||||
|
||||
export const MixinRemoteWriter = (BaseClass = Object) =>
|
||||
const { info, warn } = createLogger('xo:backups:MixinRemoteWriter')
|
||||
|
||||
exports.MixinRemoteWriter = (BaseClass = Object) =>
|
||||
class MixinRemoteWriter extends BaseClass {
|
||||
#lock
|
||||
|
||||
constructor({ remoteId, adapter, ...rest }) {
|
||||
constructor({ remoteId, ...rest }) {
|
||||
super(rest)
|
||||
|
||||
this._adapter = adapter
|
||||
this._adapter = rest.backup.remoteAdapters[remoteId]
|
||||
this._remoteId = remoteId
|
||||
|
||||
this._vmBackupDir = getVmBackupDir(rest.vmUuid)
|
||||
this._vmBackupDir = getVmBackupDir(this._backup.vm.uuid)
|
||||
}
|
||||
|
||||
async _cleanVm(options) {
|
||||
@@ -36,7 +38,7 @@ export const MixinRemoteWriter = (BaseClass = Object) =>
|
||||
Task.warning(message, data)
|
||||
},
|
||||
lock: false,
|
||||
mergeBlockConcurrency: this._config.mergeBlockConcurrency,
|
||||
mergeBlockConcurrency: this._backup.config.mergeBlockConcurrency,
|
||||
})
|
||||
})
|
||||
} catch (error) {
|
||||
@@ -53,10 +55,10 @@ export const MixinRemoteWriter = (BaseClass = Object) =>
|
||||
}
|
||||
|
||||
async afterBackup() {
|
||||
const { disableMergeWorker } = this._config
|
||||
const { disableMergeWorker } = this._backup.config
|
||||
// merge worker only compatible with local remotes
|
||||
const { handler } = this._adapter
|
||||
const willMergeInWorker = !disableMergeWorker && typeof handler.getRealPath === 'function'
|
||||
const willMergeInWorker = !disableMergeWorker && typeof handler._getRealPath === 'function'
|
||||
|
||||
const { merge } = await this._cleanVm({ remove: true, merge: !willMergeInWorker })
|
||||
await this.#lock.dispose()
|
||||
@@ -68,15 +70,13 @@ export const MixinRemoteWriter = (BaseClass = Object) =>
|
||||
// add a random suffix to avoid collision in case multiple tasks are created at the same second
|
||||
Math.random().toString(36).slice(2)
|
||||
|
||||
await handler.outputFile(taskFile, this._vmUuid)
|
||||
const remotePath = handler.getRealPath()
|
||||
await handler.outputFile(taskFile, this._backup.vm.uuid)
|
||||
const remotePath = handler._getRealPath()
|
||||
await MergeWorker.run(remotePath)
|
||||
}
|
||||
}
|
||||
|
||||
healthCheck() {
|
||||
const sr = this._healthCheckSr
|
||||
assert.notStrictEqual(sr, undefined, 'SR should be defined before making a health check')
|
||||
healthCheck(sr) {
|
||||
assert.notStrictEqual(
|
||||
this._metadataFileName,
|
||||
undefined,
|
||||
@@ -96,9 +96,6 @@ export const MixinRemoteWriter = (BaseClass = Object) =>
|
||||
metadata,
|
||||
srUuid,
|
||||
xapi,
|
||||
settings: {
|
||||
additionnalVmTag: 'xo:no-bak=Health Check',
|
||||
},
|
||||
}).run()
|
||||
const restoredVm = xapi.getObject(restoredId)
|
||||
try {
|
||||
@@ -112,16 +109,4 @@ export const MixinRemoteWriter = (BaseClass = Object) =>
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
_isAlreadyTransferred(timestamp) {
|
||||
const vmUuid = this._vmUuid
|
||||
const adapter = this._adapter
|
||||
const backupDir = getVmBackupDir(vmUuid)
|
||||
try {
|
||||
const actualMetadata = JSON.parse(
|
||||
adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`)
|
||||
)
|
||||
return actualMetadata
|
||||
} catch (error) {}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
'use strict'
|
||||
|
||||
const { Task } = require('../../../Task')
|
||||
const assert = require('node:assert/strict')
|
||||
const { HealthCheckVmBackup } = require('../../../HealthCheckVmBackup')
|
||||
|
||||
function extractOpaqueRef(str) {
|
||||
const OPAQUE_REF_RE = /OpaqueRef:[0-9a-z-]+/
|
||||
const matches = OPAQUE_REF_RE.exec(str)
|
||||
if (!matches) {
|
||||
throw new Error('no opaque ref found')
|
||||
}
|
||||
return matches[0]
|
||||
}
|
||||
exports.MixinXapiWriter = (BaseClass = Object) =>
|
||||
class MixinXapiWriter extends BaseClass {
|
||||
constructor({ sr, ...rest }) {
|
||||
super(rest)
|
||||
|
||||
this._sr = sr
|
||||
}
|
||||
|
||||
healthCheck(sr) {
|
||||
assert.notEqual(this._targetVmRef, undefined, 'A vm should have been transfered to be health checked')
|
||||
// copy VM
|
||||
return Task.run(
|
||||
{
|
||||
name: 'health check',
|
||||
},
|
||||
async () => {
|
||||
const { $xapi: xapi } = sr
|
||||
let clonedVm
|
||||
try {
|
||||
const baseVm = xapi.getObject(this._targetVmRef) ?? (await xapi.waitObject(this._targetVmRef))
|
||||
const clonedRef = await xapi
|
||||
.callAsync('VM.clone', this._targetVmRef, `Health Check - ${baseVm.name_label}`)
|
||||
.then(extractOpaqueRef)
|
||||
clonedVm = xapi.getObject(clonedRef) ?? (await xapi.waitObject(clonedRef))
|
||||
|
||||
await new HealthCheckVmBackup({
|
||||
restoredVm: clonedVm,
|
||||
xapi,
|
||||
}).run()
|
||||
} finally {
|
||||
clonedVm && (await xapi.VM_destroy(clonedVm.$ref))
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
'use strict'
|
||||
|
||||
const openVhd = require('vhd-lib').openVhd
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
|
||||
exports.checkVhd = async function checkVhd(handler, path) {
|
||||
await Disposable.use(openVhd(handler, path), () => {})
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
// returns all entries but the last retention-th
|
||||
export function getOldEntries(retention, entries) {
|
||||
exports.getOldEntries = function getOldEntries(retention, entries) {
|
||||
return entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
'use strict'
|
||||
|
||||
const getReplicatedVmDatetime = vm => {
|
||||
const { 'xo:backup:datetime': datetime = vm.name_label.slice(-17, -1) } = vm.other_config
|
||||
return datetime
|
||||
@@ -5,7 +7,7 @@ const getReplicatedVmDatetime = vm => {
|
||||
|
||||
const compareReplicatedVmDatetime = (a, b) => (getReplicatedVmDatetime(a) < getReplicatedVmDatetime(b) ? -1 : 1)
|
||||
|
||||
export function listReplicatedVms(xapi, scheduleOrJobId, srUuid, vmUuid) {
|
||||
exports.listReplicatedVms = function listReplicatedVms(xapi, scheduleOrJobId, srUuid, vmUuid) {
|
||||
const { all } = xapi.objects
|
||||
const vms = {}
|
||||
for (const key in all) {
|
||||
@@ -1,5 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const PARSE_UUID_RE = /-/g
|
||||
|
||||
export function packUuid(uuid) {
|
||||
exports.packUuid = function packUuid(uuid) {
|
||||
return Buffer.from(uuid.replace(PARSE_UUID_RE, ''), 'hex')
|
||||
}
|
||||
@@ -1,15 +1,17 @@
|
||||
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
'use strict'
|
||||
|
||||
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
import createStreamThrottle from './_createStreamThrottle.mjs'
|
||||
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
import { IncrementalXapi } from './_vmRunners/IncrementalXapi.mjs'
|
||||
import { FullXapi } from './_vmRunners/FullXapi.mjs'
|
||||
const { asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('../extractIdsFromSimplePattern.js')
|
||||
const { Task } = require('../Task.js')
|
||||
const createStreamThrottle = require('./createStreamThrottle.js')
|
||||
const { IncrementalXapiVmBackup } = require('./VmBackup/IncrementalXapiVmBackup.js')
|
||||
const { FullXapiVmBackup } = require('./VmBackup/FullXapiVmBackup.js')
|
||||
const { DEFAULT_SETTINGS, AbstractBackupJob } = require('./AbstractBackupJob.js')
|
||||
const { runTask } = require('./runTask.js')
|
||||
const { getAdaptersByRemote } = require('./getAdapterByRemote.js')
|
||||
|
||||
const DEFAULT_XAPI_VM_SETTINGS = {
|
||||
bypassVdiChainsCheck: false,
|
||||
@@ -17,7 +19,6 @@ const DEFAULT_XAPI_VM_SETTINGS = {
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
diskPerVmConcurrency: 0, // not limited by default
|
||||
exportRetention: 0,
|
||||
fullInterval: 0,
|
||||
healthCheckSr: undefined,
|
||||
@@ -34,7 +35,7 @@ const DEFAULT_XAPI_VM_SETTINGS = {
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
export const VmsXapi = class VmsXapiBackupRunner extends Abstract {
|
||||
exports.XapiVmBackupJob = class XapiVmBackupJob extends AbstractBackupJob {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_XAPI_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
@@ -112,10 +113,10 @@ export const VmsXapi = class VmsXapiBackupRunner extends Abstract {
|
||||
}
|
||||
let vmBackup
|
||||
if (job.mode === 'delta') {
|
||||
vmBackup = new IncrementalXapi(opts)
|
||||
vmBackup = new IncrementalXapiVmBackup(opts)
|
||||
} else {
|
||||
if (job.mode === 'full') {
|
||||
vmBackup = new FullXapi(opts)
|
||||
vmBackup = new FullXapiVmBackup(opts)
|
||||
} else {
|
||||
throw new Error(`Job mode ${job.mode} not implemented`)
|
||||
}
|
||||
@@ -1,11 +1,12 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { join } from '@xen-orchestra/fs/path'
|
||||
'use strict'
|
||||
|
||||
import { DIR_XO_CONFIG_BACKUPS } from '../RemoteAdapter.mjs'
|
||||
import { formatFilenameDate } from '../_filenameDate.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
export class XoMetadataBackup {
|
||||
const { DIR_XO_CONFIG_BACKUPS } = require('../RemoteAdapter.js')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { Task } = require('../Task.js')
|
||||
|
||||
exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
constructor({ config, job, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
@@ -22,17 +23,10 @@ export class XoMetadataBackup {
|
||||
const dir = `${scheduleDir}/${formatFilenameDate(timestamp)}`
|
||||
|
||||
const data = job.xoMetadata
|
||||
let dataBaseName = './data'
|
||||
|
||||
// JSON data is sent as plain string, binary data is sent as an object with `data` and `encoding properties
|
||||
const isJson = typeof data === 'string'
|
||||
if (isJson) {
|
||||
dataBaseName += '.json'
|
||||
}
|
||||
const fileName = `${dir}/data.json`
|
||||
|
||||
const metadata = JSON.stringify(
|
||||
{
|
||||
data: dataBaseName,
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
scheduleId: schedule.id,
|
||||
@@ -42,8 +36,6 @@ export class XoMetadataBackup {
|
||||
null,
|
||||
2
|
||||
)
|
||||
|
||||
const dataFileName = join(dir, dataBaseName)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
await asyncMap(
|
||||
@@ -60,7 +52,7 @@ export class XoMetadataBackup {
|
||||
async () => {
|
||||
const handler = adapter.handler
|
||||
const dirMode = this._config.dirMode
|
||||
await handler.outputFile(dataFileName, isJson ? data : Buffer.from(data.data, data.encoding), { dirMode })
|
||||
await handler.outputFile(fileName, data, { dirMode })
|
||||
await handler.outputFile(metaDataFileName, metadata, {
|
||||
dirMode,
|
||||
})
|
||||
@@ -1,10 +1,12 @@
|
||||
import { pipeline } from 'node:stream'
|
||||
import { ThrottleGroup } from '@kldzj/stream-throttle'
|
||||
import identity from 'lodash/identity.js'
|
||||
'use strict'
|
||||
|
||||
const { pipeline } = require('node:stream')
|
||||
const { ThrottleGroup } = require('@kldzj/stream-throttle')
|
||||
const identity = require('lodash/identity.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
export default function createStreamThrottle(rate) {
|
||||
module.exports = function createStreamThrottle(rate) {
|
||||
if (rate === 0) {
|
||||
return identity
|
||||
}
|
||||
@@ -1,13 +1,14 @@
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { finished, PassThrough } from 'node:stream'
|
||||
'use strict'
|
||||
|
||||
const { debug } = createLogger('xo:backups:forkStreamUnpipe')
|
||||
const { finished, PassThrough } = require('node:stream')
|
||||
|
||||
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
|
||||
|
||||
// create a new readable stream from an existing one which may be piped later
|
||||
//
|
||||
// in case of error in the new readable stream, it will simply be unpiped
|
||||
// from the original one
|
||||
export function forkStreamUnpipe(source) {
|
||||
exports.forkStreamUnpipe = function forkStreamUnpipe(source) {
|
||||
const { forks = 0 } = source
|
||||
source.forks = forks + 1
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
export function getAdaptersByRemote(adapters) {
|
||||
'use strict'
|
||||
|
||||
exports.getAdaptersByRemote = adapters => {
|
||||
const adaptersByRemote = {}
|
||||
adapters.forEach(({ adapter, remoteId }) => {
|
||||
adaptersByRemote[remoteId] = adapter
|
||||
7
@xen-orchestra/backups/_backupJob/runTask.js
Normal file
7
@xen-orchestra/backups/_backupJob/runTask.js
Normal file
@@ -0,0 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const { Task } = require('../Task')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
exports.runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
6
@xen-orchestra/backups/_backupType.js
Normal file
6
@xen-orchestra/backups/_backupType.js
Normal file
@@ -0,0 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
exports.isMetadataFile = filename => filename.endsWith('.json')
|
||||
exports.isVhdFile = filename => filename.endsWith('.vhd')
|
||||
exports.isXvaFile = filename => filename.endsWith('.xva')
|
||||
exports.isXvaSumFile = filename => filename.endsWith('.xva.checksum')
|
||||
@@ -1,4 +0,0 @@
|
||||
export const isMetadataFile = filename => filename.endsWith('.json')
|
||||
export const isVhdFile = filename => filename.endsWith('.vhd')
|
||||
export const isXvaFile = filename => filename.endsWith('.xva')
|
||||
export const isXvaSumFile = filename => filename.endsWith('.xva.checksum')
|
||||
@@ -1,25 +1,25 @@
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { catchGlobalErrors } from '@xen-orchestra/log/configure'
|
||||
'use strict'
|
||||
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { compose } from '@vates/compose'
|
||||
import { createCachedLookup } from '@vates/cached-dns.lookup'
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource.js'
|
||||
import { createRunner } from './Backup.mjs'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { deduped } from '@vates/disposable/deduped.js'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { parseDuration } from '@vates/parse-duration'
|
||||
import { Xapi } from '@xen-orchestra/xapi'
|
||||
const logger = require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
||||
|
||||
import { RemoteAdapter } from './RemoteAdapter.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
require('@xen-orchestra/log/configure').catchGlobalErrors(logger)
|
||||
|
||||
createCachedLookup().patchGlobal()
|
||||
require('@vates/cached-dns.lookup').createCachedLookup().patchGlobal()
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { createDebounceResource } = require('@vates/disposable/debounceResource.js')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { parseDuration } = require('@vates/parse-duration')
|
||||
const { Xapi } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { instantiateBackupJob } = require('./backupJob.js')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
const logger = createLogger('xo:backups:worker')
|
||||
catchGlobalErrors(logger)
|
||||
const { debug } = logger
|
||||
|
||||
class BackupWorker {
|
||||
@@ -48,7 +48,7 @@ class BackupWorker {
|
||||
}
|
||||
|
||||
run() {
|
||||
return createRunner({
|
||||
return instantiateBackupJob({
|
||||
config: this.#config,
|
||||
getAdapter: remoteId => this.getAdapter(this.#remotes[remoteId]),
|
||||
getConnectedRecord: Disposable.factory(async function* getConnectedRecord(type, uuid) {
|
||||
@@ -1,11 +1,13 @@
|
||||
import cancelable from 'promise-toolbox/cancelable'
|
||||
import CancelToken from 'promise-toolbox/CancelToken'
|
||||
'use strict'
|
||||
|
||||
const cancelable = require('promise-toolbox/cancelable')
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
|
||||
// Similar to `Promise.all` + `map` but pass a cancel token to the callback
|
||||
//
|
||||
// If any of the executions fails, the cancel token will be triggered and the
|
||||
// first reason will be rejected.
|
||||
export const cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
||||
exports.cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
||||
const { cancel, token } = CancelToken.source([$cancelToken])
|
||||
try {
|
||||
return await Promise.all(
|
||||
@@ -1,19 +1,19 @@
|
||||
import test from 'test'
|
||||
import { strict as assert } from 'node:assert'
|
||||
'use strict'
|
||||
|
||||
import tmp from 'tmp'
|
||||
import fs from 'fs-extra'
|
||||
import * as uuid from 'uuid'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { RemoteAdapter } from './RemoteAdapter.mjs'
|
||||
import { VHDFOOTER, VHDHEADER } from './tests.fixtures.mjs'
|
||||
import { VhdFile, Constants, VhdDirectory, VhdAbstract } from 'vhd-lib'
|
||||
import { checkAliases } from './_cleanVm.mjs'
|
||||
import { dirname, basename } from 'node:path'
|
||||
import { rimraf } from 'rimraf'
|
||||
const { beforeEach, afterEach, test, describe } = require('test')
|
||||
const assert = require('assert').strict
|
||||
|
||||
const { beforeEach, afterEach, describe } = test
|
||||
const rimraf = require('rimraf')
|
||||
const tmp = require('tmp')
|
||||
const fs = require('fs-extra')
|
||||
const uuid = require('uuid')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter')
|
||||
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
|
||||
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
|
||||
const { checkAliases } = require('./_cleanVm')
|
||||
const { dirname, basename } = require('path')
|
||||
|
||||
let tempDir, adapter, handler, jobId, vdiId, basePath, relativePath
|
||||
const rootPath = 'xo-vm-backups/VMUUID/'
|
||||
@@ -67,11 +67,6 @@ async function generateVhd(path, opts = {}) {
|
||||
await VhdAbstract.createAlias(handler, path + '.alias.vhd', dataPath)
|
||||
}
|
||||
|
||||
if (opts.blocks) {
|
||||
for (const blockId of opts.blocks) {
|
||||
await vhd.writeEntireBlock({ id: blockId, buffer: Buffer.alloc(2 * 1024 * 1024 + 512, blockId) })
|
||||
}
|
||||
}
|
||||
await vhd.writeBlockAllocationTable()
|
||||
await vhd.writeHeader()
|
||||
await vhd.writeFooter()
|
||||
@@ -235,7 +230,7 @@ test('it merges delta of non destroyed chain', async () => {
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
|
||||
// size should be the size of children + grand children after the merge
|
||||
assert.equal(metadata.size, 104960)
|
||||
assert.equal(metadata.size, 209920)
|
||||
|
||||
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
|
||||
// only check deletion
|
||||
@@ -325,7 +320,6 @@ describe('tests multiple combination ', () => {
|
||||
const ancestor = await generateVhd(`${basePath}/ancestor.vhd`, {
|
||||
useAlias,
|
||||
mode: vhdMode,
|
||||
blocks: [1, 3],
|
||||
})
|
||||
const child = await generateVhd(`${basePath}/child.vhd`, {
|
||||
useAlias,
|
||||
@@ -334,7 +328,6 @@ describe('tests multiple combination ', () => {
|
||||
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||
parentUuid: ancestor.footer.uuid,
|
||||
},
|
||||
blocks: [1, 2],
|
||||
})
|
||||
// a grand child vhd in metadata
|
||||
await generateVhd(`${basePath}/grandchild.vhd`, {
|
||||
@@ -344,7 +337,6 @@ describe('tests multiple combination ', () => {
|
||||
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||
parentUuid: child.footer.uuid,
|
||||
},
|
||||
blocks: [2, 3],
|
||||
})
|
||||
|
||||
// an older parent that was merging in clean
|
||||
@@ -403,7 +395,7 @@ describe('tests multiple combination ', () => {
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
|
||||
// size should be the size of children + grand children + clean after the merge
|
||||
assert.deepEqual(metadata.size, vhdMode === 'file' ? 6502400 : 6501888)
|
||||
assert.deepEqual(metadata.size, vhdMode === 'file' ? 314880 : undefined)
|
||||
|
||||
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
|
||||
// ancestor and child should be merged
|
||||
@@ -1,18 +1,19 @@
|
||||
import * as UUID from 'uuid'
|
||||
import sum from 'lodash/sum.js'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { Constants, openVhd, VhdAbstract, VhdFile } from 'vhd-lib'
|
||||
import { isVhdAlias, resolveVhdAlias } from 'vhd-lib/aliases.js'
|
||||
import { dirname, resolve } from 'node:path'
|
||||
import { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } from './_backupType.mjs'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
import { mergeVhdChain } from 'vhd-lib/merge.js'
|
||||
|
||||
import { Task } from './Task.mjs'
|
||||
import { Disposable } from 'promise-toolbox'
|
||||
import handlerPath from '@xen-orchestra/fs/path'
|
||||
'use strict'
|
||||
|
||||
const sum = require('lodash/sum')
|
||||
const UUID = require('uuid')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
|
||||
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPES } = Constants
|
||||
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
const { mergeVhdChain } = require('vhd-lib/merge')
|
||||
|
||||
const { Task } = require('./Task.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
const handlerPath = require('@xen-orchestra/fs/path')
|
||||
|
||||
// checking the size of a vhd directory is costly
|
||||
// 1 Http Query per 1000 blocks
|
||||
@@ -36,32 +37,34 @@ const computeVhdsSize = (handler, vhdPaths) =>
|
||||
)
|
||||
|
||||
// chain is [ ancestor, child_1, ..., child_n ]
|
||||
async function _mergeVhdChain(handler, chain, { logInfo, remove, mergeBlockConcurrency }) {
|
||||
logInfo(`merging VHD chain`, { chain })
|
||||
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
|
||||
if (merge) {
|
||||
logInfo(`merging VHD chain`, { chain })
|
||||
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
logInfo('merge in progress', {
|
||||
done,
|
||||
parent: chain[0],
|
||||
progress: Math.round((100 * done) / total),
|
||||
total,
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
logInfo('merge in progress', {
|
||||
done,
|
||||
parent: chain[0],
|
||||
progress: Math.round((100 * done) / total),
|
||||
total,
|
||||
})
|
||||
}
|
||||
}, 10e3)
|
||||
try {
|
||||
return await mergeVhdChain(handler, chain, {
|
||||
logInfo,
|
||||
mergeBlockConcurrency,
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
removeUnused: remove,
|
||||
})
|
||||
} finally {
|
||||
clearInterval(handle)
|
||||
}
|
||||
}, 10e3)
|
||||
try {
|
||||
return await mergeVhdChain(handler, chain, {
|
||||
logInfo,
|
||||
mergeBlockConcurrency,
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
removeUnused: remove,
|
||||
})
|
||||
} finally {
|
||||
clearInterval(handle)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,7 +117,7 @@ const listVhds = async (handler, vmDir, logWarn) => {
|
||||
return { vhds, interruptedVhds, aliases }
|
||||
}
|
||||
|
||||
export async function checkAliases(
|
||||
async function checkAliases(
|
||||
aliasPaths,
|
||||
targetDataRepository,
|
||||
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
|
||||
@@ -173,9 +176,11 @@ export async function checkAliases(
|
||||
})
|
||||
}
|
||||
|
||||
exports.checkAliases = checkAliases
|
||||
|
||||
const defaultMergeLimiter = limitConcurrency(1)
|
||||
|
||||
export async function cleanVm(
|
||||
exports.cleanVm = async function cleanVm(
|
||||
vmDir,
|
||||
{
|
||||
fixMetadata,
|
||||
@@ -469,20 +474,23 @@ export async function cleanVm(
|
||||
const metadataWithMergedVhd = {}
|
||||
const doMerge = async () => {
|
||||
await asyncMap(toMerge, async chain => {
|
||||
const { finalVhdSize } = await limitedMergeVhdChain(handler, chain, {
|
||||
const merged = await limitedMergeVhdChain(handler, chain, {
|
||||
logInfo,
|
||||
logWarn,
|
||||
remove,
|
||||
merge,
|
||||
mergeBlockConcurrency,
|
||||
})
|
||||
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
|
||||
metadataWithMergedVhd[metadataPath] = (metadataWithMergedVhd[metadataPath] ?? 0) + finalVhdSize
|
||||
if (merged !== undefined) {
|
||||
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
|
||||
metadataWithMergedVhd[metadataPath] = true
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
...unusedVhdsDeletion,
|
||||
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : () => Promise.resolve()),
|
||||
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
|
||||
asyncMap(unusedXvas, path => {
|
||||
logWarn('unused XVA', { path })
|
||||
if (remove) {
|
||||
@@ -504,11 +512,12 @@ export async function cleanVm(
|
||||
|
||||
// update size for delta metadata with merged VHD
|
||||
// check for the other that the size is the same as the real file size
|
||||
|
||||
await asyncMap(jsons, async metadataPath => {
|
||||
const metadata = backups.get(metadataPath)
|
||||
|
||||
let fileSystemSize
|
||||
const mergedSize = metadataWithMergedVhd[metadataPath]
|
||||
const merged = metadataWithMergedVhd[metadataPath] !== undefined
|
||||
|
||||
const { mode, size, vhds, xva } = metadata
|
||||
|
||||
@@ -518,29 +527,26 @@ export async function cleanVm(
|
||||
const linkedXva = resolve('/', vmDir, xva)
|
||||
try {
|
||||
fileSystemSize = await handler.getSize(linkedXva)
|
||||
if (fileSystemSize !== size && fileSystemSize !== undefined) {
|
||||
logWarn('cleanVm: incorrect backup size in metadata', {
|
||||
path: metadataPath,
|
||||
actual: size ?? 'none',
|
||||
expected: fileSystemSize,
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
// can fail with encrypted remote
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
|
||||
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
|
||||
|
||||
// the size is not computed in some cases (e.g. VhdDirectory)
|
||||
if (fileSystemSize === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// don't warn if the size has changed after a merge
|
||||
if (mergedSize === undefined) {
|
||||
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
|
||||
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
|
||||
// the size is not computed in some cases (e.g. VhdDirectory)
|
||||
if (fileSystemSize !== undefined && fileSystemSize !== size) {
|
||||
logWarn('cleanVm: incorrect backup size in metadata', {
|
||||
path: metadataPath,
|
||||
actual: size ?? 'none',
|
||||
expected: fileSystemSize,
|
||||
})
|
||||
}
|
||||
if (!merged && fileSystemSize !== size) {
|
||||
// FIXME: figure out why it occurs so often and, once fixed, log the real problems with `logWarn`
|
||||
console.warn('cleanVm: incorrect backup size in metadata', {
|
||||
path: metadataPath,
|
||||
actual: size ?? 'none',
|
||||
expected: fileSystemSize,
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
@@ -548,19 +554,9 @@ export async function cleanVm(
|
||||
return
|
||||
}
|
||||
|
||||
// systematically update size and differentials after a merge
|
||||
|
||||
// @todo : after 2024-04-01 remove the fixmetadata options since the size computation is fixed
|
||||
if (mergedSize || (fixMetadata && fileSystemSize !== size)) {
|
||||
metadata.size = mergedSize ?? fileSystemSize ?? size
|
||||
|
||||
if (mergedSize) {
|
||||
// all disks are now key disk
|
||||
metadata.isVhdDifferencing = {}
|
||||
for (const id of Object.values(metadata.vdis ?? {})) {
|
||||
metadata.isVhdDifferencing[`${id}.vhd`] = false
|
||||
}
|
||||
}
|
||||
// systematically update size after a merge
|
||||
if ((merged || fixMetadata) && size !== fileSystemSize) {
|
||||
metadata.size = fileSystemSize
|
||||
mustRegenerateCache = true
|
||||
try {
|
||||
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
|
||||
8
@xen-orchestra/backups/_filenameDate.js
Normal file
8
@xen-orchestra/backups/_filenameDate.js
Normal file
@@ -0,0 +1,8 @@
|
||||
'use strict'
|
||||
|
||||
const { utcFormat, utcParse } = require('d3-time-format')
|
||||
|
||||
// Format a date in ISO 8601 in a safe way to be used in filenames
|
||||
// (even on Windows).
|
||||
exports.formatFilenameDate = utcFormat('%Y%m%dT%H%M%SZ')
|
||||
exports.parseFilenameDate = utcParse('%Y%m%dT%H%M%SZ')
|
||||
@@ -1,6 +0,0 @@
|
||||
import { utcFormat, utcParse } from 'd3-time-format'
|
||||
|
||||
// Format a date in ISO 8601 in a safe way to be used in filenames
|
||||
// (even on Windows).
|
||||
export const formatFilenameDate = utcFormat('%Y%m%dT%H%M%SZ')
|
||||
export const parseFilenameDate = utcParse('%Y%m%dT%H%M%SZ')
|
||||
@@ -1,11 +1,13 @@
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { join } from 'node:path'
|
||||
import { mkdir, rmdir } from 'node:fs/promises'
|
||||
import { tmpdir } from 'os'
|
||||
'use strict'
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const { join } = require('path')
|
||||
const { mkdir, rmdir } = require('fs-extra')
|
||||
const { tmpdir } = require('os')
|
||||
|
||||
const MAX_ATTEMPTS = 3
|
||||
|
||||
export async function getTmpDir() {
|
||||
exports.getTmpDir = async function getTmpDir() {
|
||||
for (let i = 0; true; ++i) {
|
||||
const path = join(tmpdir(), Math.random().toString(36).slice(2))
|
||||
try {
|
||||
8
@xen-orchestra/backups/_getVmBackupDir.js
Normal file
8
@xen-orchestra/backups/_getVmBackupDir.js
Normal file
@@ -0,0 +1,8 @@
|
||||
'use strict'
|
||||
|
||||
const BACKUP_DIR = 'xo-vm-backups'
|
||||
exports.BACKUP_DIR = BACKUP_DIR
|
||||
|
||||
exports.getVmBackupDir = function getVmBackupDir(uuid) {
|
||||
return `${BACKUP_DIR}/${uuid}`
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
export const BACKUP_DIR = 'xo-vm-backups'
|
||||
|
||||
export function getVmBackupDir(uuid) {
|
||||
return `${BACKUP_DIR}/${uuid}`
|
||||
}
|
||||
@@ -1,32 +1,41 @@
|
||||
import groupBy from 'lodash/groupBy.js'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import omit from 'lodash/omit.js'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { CancelToken } from 'promise-toolbox'
|
||||
import { compareVersions } from 'compare-versions'
|
||||
import { createVhdStreamWithLength } from 'vhd-lib'
|
||||
import { defer } from 'golike-defer'
|
||||
'use strict'
|
||||
|
||||
import { cancelableMap } from './_cancelableMap.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
import pick from 'lodash/pick.js'
|
||||
const find = require('lodash/find.js')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const omit = require('lodash/omit.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { CancelToken } = require('promise-toolbox')
|
||||
const { compareVersions } = require('compare-versions')
|
||||
const { createVhdStreamWithLength } = require('vhd-lib')
|
||||
const { defer } = require('golike-defer')
|
||||
|
||||
// in `other_config` of an incrementally replicated VM, contains the UUID of the source VM
|
||||
export const TAG_BASE_DELTA = 'xo:base_delta'
|
||||
const { cancelableMap } = require('./_cancelableMap.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const pick = require('lodash/pick.js')
|
||||
|
||||
// in `other_config` of an incrementally replicated VM, contains the UUID of the target SR used for replication
|
||||
//
|
||||
// added after the complete replication
|
||||
export const TAG_BACKUP_SR = 'xo:backup:sr'
|
||||
const TAG_BASE_DELTA = 'xo:base_delta'
|
||||
exports.TAG_BASE_DELTA = TAG_BASE_DELTA
|
||||
|
||||
// in other_config of VDIs of an incrementally replicated VM, contains the UUID of the source VDI
|
||||
export const TAG_COPY_SRC = 'xo:copy_of'
|
||||
const TAG_COPY_SRC = 'xo:copy_of'
|
||||
exports.TAG_COPY_SRC = TAG_COPY_SRC
|
||||
|
||||
const ensureArray = value => (value === undefined ? [] : Array.isArray(value) ? value : [value])
|
||||
const resolveUuid = async (xapi, cache, uuid, type) => {
|
||||
if (uuid == null) {
|
||||
return uuid
|
||||
}
|
||||
let ref = cache.get(uuid)
|
||||
if (ref === undefined) {
|
||||
ref = await xapi.call(`${type}.get_by_uuid`, uuid)
|
||||
cache.set(uuid, ref)
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
export async function exportIncrementalVm(
|
||||
exports.exportIncrementalVm = async function exportIncrementalVm(
|
||||
vm,
|
||||
baseVm,
|
||||
vmComparisonBasis,
|
||||
{
|
||||
cancelToken = CancelToken.none,
|
||||
|
||||
@@ -34,17 +43,15 @@ export async function exportIncrementalVm(
|
||||
fullVdisRequired = new Set(),
|
||||
|
||||
disableBaseTags = false,
|
||||
nbdConcurrency = 1,
|
||||
preferNbd,
|
||||
} = {}
|
||||
) {
|
||||
// refs of VM's VDIs → base's VDIs.
|
||||
const baseVdis = {}
|
||||
baseVm &&
|
||||
baseVm.$VBDs.forEach(vbd => {
|
||||
const vdisCompaisonBasis = {}
|
||||
vmComparisonBasis &&
|
||||
vmComparisonBasis.$VBDs.forEach(vbd => {
|
||||
let vdi, snapshotOf
|
||||
if ((vdi = vbd.$VDI) && (snapshotOf = vdi.$snapshot_of) && !fullVdisRequired.has(snapshotOf.uuid)) {
|
||||
baseVdis[vdi.snapshot_of] = vdi
|
||||
vdisCompaisonBasis[vdi.snapshot_of] = vdi
|
||||
}
|
||||
})
|
||||
|
||||
@@ -67,24 +74,22 @@ export async function exportIncrementalVm(
|
||||
}
|
||||
|
||||
// Look for a snapshot of this vdi in the base VM.
|
||||
const baseVdi = baseVdis[vdi.snapshot_of]
|
||||
const vdiComparisonBasis = vdisCompaisonBasis[vdi.snapshot_of]
|
||||
|
||||
vdis[vdiRef] = {
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: baseVdi && !disableBaseTags ? baseVdi.uuid : undefined,
|
||||
[TAG_BASE_DELTA]: vdiComparisonBasis && !disableBaseTags ? vdiComparisonBasis.uuid : undefined,
|
||||
},
|
||||
$snapshot_of$uuid: vdi.$snapshot_of?.uuid,
|
||||
$SR$uuid: vdi.$SR.uuid,
|
||||
}
|
||||
|
||||
streams[`${vdiRef}.vhd`] = await vdi.$exportContent({
|
||||
baseRef: baseVdi?.$ref,
|
||||
baseRef: vdiComparisonBasis?.$ref,
|
||||
cancelToken,
|
||||
format: 'vhd',
|
||||
nbdConcurrency,
|
||||
preferNbd,
|
||||
})
|
||||
})
|
||||
|
||||
@@ -121,10 +126,10 @@ export async function exportIncrementalVm(
|
||||
vm: {
|
||||
...vm,
|
||||
other_config:
|
||||
baseVm && !disableBaseTags
|
||||
vmComparisonBasis && !disableBaseTags
|
||||
? {
|
||||
...vm.other_config,
|
||||
[TAG_BASE_DELTA]: baseVm.uuid,
|
||||
[TAG_BASE_DELTA]: vmComparisonBasis.uuid,
|
||||
}
|
||||
: omit(vm.other_config, TAG_BASE_DELTA),
|
||||
},
|
||||
@@ -138,11 +143,11 @@ export async function exportIncrementalVm(
|
||||
)
|
||||
}
|
||||
|
||||
export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
exports.importIncrementalVm = defer(async function importIncrementalVm(
|
||||
$defer,
|
||||
incrementalVm,
|
||||
sr,
|
||||
{ cancelToken = CancelToken.none, newMacAddresses = false } = {}
|
||||
{ cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {}, newMacAddresses = false } = {}
|
||||
) {
|
||||
const { version } = incrementalVm
|
||||
if (compareVersions(version, '1.0.0') < 0) {
|
||||
@@ -152,6 +157,32 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
const vmRecord = incrementalVm.vm
|
||||
const xapi = sr.$xapi
|
||||
|
||||
let baseVm
|
||||
if (detectBase) {
|
||||
const remoteBaseVmUuid = vmRecord.other_config[TAG_BASE_DELTA]
|
||||
if (remoteBaseVmUuid) {
|
||||
baseVm = find(xapi.objects.all, obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid)
|
||||
|
||||
if (!baseVm) {
|
||||
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const cache = new Map()
|
||||
const mapVdisSrRefs = {}
|
||||
for (const [vdiUuid, srUuid] of Object.entries(mapVdisSrs)) {
|
||||
mapVdisSrRefs[vdiUuid] = await resolveUuid(xapi, cache, srUuid, 'SR')
|
||||
}
|
||||
|
||||
const baseVdis = {}
|
||||
baseVm &&
|
||||
baseVm.$VBDs.forEach(vbd => {
|
||||
const vdi = vbd.$VDI
|
||||
if (vdi !== undefined) {
|
||||
baseVdis[vbd.VDI] = vbd.$VDI
|
||||
}
|
||||
})
|
||||
const vdiRecords = incrementalVm.vdis
|
||||
|
||||
// 0. Create suspend_VDI
|
||||
@@ -163,7 +194,18 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
vm: pick(vmRecord, 'uuid', 'name_label', 'suspend_VDI'),
|
||||
})
|
||||
} else {
|
||||
suspendVdi = await xapi.getRecord('VDI', await xapi.VDI_create(vdi))
|
||||
suspendVdi = await xapi.getRecord(
|
||||
'VDI',
|
||||
await xapi.VDI_create({
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: undefined,
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
},
|
||||
sr: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
|
||||
})
|
||||
)
|
||||
$defer.onFailure(() => suspendVdi.$destroy())
|
||||
}
|
||||
}
|
||||
@@ -181,6 +223,10 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
ha_always_run: false,
|
||||
is_a_template: false,
|
||||
name_label: '[Importing…] ' + vmRecord.name_label,
|
||||
other_config: {
|
||||
...vmRecord.other_config,
|
||||
[TAG_COPY_SRC]: vmRecord.uuid,
|
||||
},
|
||||
},
|
||||
{
|
||||
bios_strings: vmRecord.bios_strings,
|
||||
@@ -201,8 +247,14 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
const vdi = vdiRecords[vdiRef]
|
||||
let newVdi
|
||||
|
||||
if (vdi.baseVdi !== undefined) {
|
||||
newVdi = await xapi.getRecord('VDI', await vdi.baseVdi.$clone())
|
||||
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
|
||||
if (remoteBaseVdiUuid) {
|
||||
const baseVdi = find(baseVdis, vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid)
|
||||
if (!baseVdi) {
|
||||
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
|
||||
}
|
||||
|
||||
newVdi = await xapi.getRecord('VDI', await baseVdi.$clone())
|
||||
$defer.onFailure(() => newVdi.$destroy())
|
||||
|
||||
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
|
||||
@@ -213,7 +265,18 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
// suspendVDI has already created
|
||||
newVdi = suspendVdi
|
||||
} else {
|
||||
newVdi = await xapi.getRecord('VDI', await xapi.VDI_create(vdi))
|
||||
newVdi = await xapi.getRecord(
|
||||
'VDI',
|
||||
await xapi.VDI_create({
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: undefined,
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
},
|
||||
SR: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
|
||||
})
|
||||
)
|
||||
$defer.onFailure(() => newVdi.$destroy())
|
||||
}
|
||||
|
||||
@@ -252,19 +315,13 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
// Import VDI contents.
|
||||
cancelableMap(cancelToken, Object.entries(newVdis), async (cancelToken, [id, vdi]) => {
|
||||
for (let stream of ensureArray(streams[`${id}.vhd`])) {
|
||||
if (stream === null) {
|
||||
// we restore a backup and reuse completly a local snapshot
|
||||
continue
|
||||
}
|
||||
if (typeof stream === 'function') {
|
||||
stream = await stream()
|
||||
}
|
||||
if (stream.length === undefined) {
|
||||
stream = await createVhdStreamWithLength(stream)
|
||||
}
|
||||
await xapi.setField('VDI', vdi.$ref, 'name_label', `[Importing] ${vdiRecords[id].name_label}`)
|
||||
await vdi.$importContent(stream, { cancelToken, format: 'vhd' })
|
||||
await xapi.setField('VDI', vdi.$ref, 'name_label', vdiRecords[id].name_label)
|
||||
}
|
||||
}),
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import assert from 'node:assert'
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
|
||||
const COMPRESSED_MAGIC_NUMBERS = [
|
||||
// https://tools.ietf.org/html/rfc1952.html#page-5
|
||||
@@ -45,7 +47,7 @@ const isValidTar = async (handler, size, fd) => {
|
||||
}
|
||||
|
||||
// TODO: find an heuristic for compressed files
|
||||
export async function isValidXva(path) {
|
||||
async function isValidXva(path) {
|
||||
const handler = this._handler
|
||||
|
||||
// size is longer when encrypted + reading part of an encrypted file is not implemented
|
||||
@@ -72,5 +74,6 @@ export async function isValidXva(path) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
exports.isValidXva = isValidXva
|
||||
|
||||
const noop = Function.prototype
|
||||
@@ -1,7 +1,9 @@
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { createParser } from 'parse-pairs'
|
||||
import { execFile } from 'child_process'
|
||||
'use strict'
|
||||
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createParser } = require('parse-pairs')
|
||||
const { execFile } = require('child_process')
|
||||
|
||||
const { debug } = createLogger('xo:backups:listPartitions')
|
||||
|
||||
@@ -22,7 +24,8 @@ const IGNORED_PARTITION_TYPES = {
|
||||
0x82: true, // swap
|
||||
}
|
||||
|
||||
export const LVM_PARTITION_TYPE = 0x8e
|
||||
const LVM_PARTITION_TYPE = 0x8e
|
||||
exports.LVM_PARTITION_TYPE = LVM_PARTITION_TYPE
|
||||
|
||||
const parsePartxLine = createParser({
|
||||
keyTransform: key => (key === 'UUID' ? 'id' : key.toLowerCase()),
|
||||
@@ -30,7 +33,7 @@ const parsePartxLine = createParser({
|
||||
})
|
||||
|
||||
// returns an empty array in case of a non-partitioned disk
|
||||
export async function listPartitions(devicePath) {
|
||||
exports.listPartitions = async function listPartitions(devicePath) {
|
||||
const parts = await fromCallback(execFile, 'partx', [
|
||||
'--bytes',
|
||||
'--output=NR,START,SIZE,NAME,UUID,TYPE',
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user