Compare commits
240 Commits
feat_progr
...
feat_nbd_s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3e1227c710 | ||
|
|
ddc73fb836 | ||
|
|
a13fda5fe9 | ||
|
|
66bee59774 | ||
|
|
685400bbf8 | ||
|
|
5bef8fc411 | ||
|
|
aa7ff1449a | ||
|
|
3dca7f2a71 | ||
|
|
3dc2f649f6 | ||
|
|
9eb537c2f9 | ||
|
|
dfd5f6882f | ||
|
|
7214016338 | ||
|
|
606e3c4ce5 | ||
|
|
fb04d3d25d | ||
|
|
db8c042131 | ||
|
|
fd9005fba8 | ||
|
|
2d25413b8d | ||
|
|
035679800a | ||
|
|
abd0a3035a | ||
|
|
d307730c68 | ||
|
|
1b44de4958 | ||
|
|
ec78a1ce8b | ||
|
|
19c82ab30d | ||
|
|
9986f3fb18 | ||
|
|
d24e9c093d | ||
|
|
70c8b24fac | ||
|
|
9c9c11104b | ||
|
|
cba90b27f4 | ||
|
|
46cbced570 | ||
|
|
52cf2d1514 | ||
|
|
e51351be8d | ||
|
|
2a42e0ff94 | ||
|
|
3a824a2bfc | ||
|
|
fc1c809a18 | ||
|
|
221cd40199 | ||
|
|
aca19d9a81 | ||
|
|
0601bbe18d | ||
|
|
2d52aee952 | ||
|
|
99605bf185 | ||
|
|
91b19d9bc4 | ||
|
|
562401ebe4 | ||
|
|
6fd2f2610d | ||
|
|
6ae19b0640 | ||
|
|
6b936d8a8c | ||
|
|
8f2cfaae00 | ||
|
|
5c215e1a8a | ||
|
|
e3cb98124f | ||
|
|
90c3319880 | ||
|
|
348db876d2 | ||
|
|
408fd7ec03 | ||
|
|
1fd84836b1 | ||
|
|
522204795f | ||
|
|
e29c422ac9 | ||
|
|
152cf09b7e | ||
|
|
ff728099dc | ||
|
|
706d94221d | ||
|
|
340e9af7f4 | ||
|
|
40e536ba61 | ||
|
|
fd4c56c8c2 | ||
|
|
20d04ba956 | ||
|
|
3b1bcc67ae | ||
|
|
1add3fbf9d | ||
|
|
97f0759de0 | ||
|
|
005ab47d9b | ||
|
|
14a0caa4c6 | ||
|
|
1c23bd5ff7 | ||
|
|
49c161b17a | ||
|
|
18dce3fce6 | ||
|
|
d6fc86b6bc | ||
|
|
61d960d4b1 | ||
|
|
02d3465832 | ||
|
|
4bbadc9515 | ||
|
|
78586291ca | ||
|
|
945dec94bf | ||
|
|
003140d96b | ||
|
|
363d7cf0d0 | ||
|
|
f0c94496bf | ||
|
|
de217eabd9 | ||
|
|
7c80d0c1e1 | ||
|
|
9fb749b1db | ||
|
|
ad9c59669a | ||
|
|
76a038e403 | ||
|
|
0e12072922 | ||
|
|
158a8e14a2 | ||
|
|
0c97910349 | ||
|
|
8347ac6ed8 | ||
|
|
996abd6e7e | ||
|
|
de8abd5b63 | ||
|
|
3de928c488 | ||
|
|
a2a514e483 | ||
|
|
ff432e04b0 | ||
|
|
4502590bb0 | ||
|
|
6d440a5af5 | ||
|
|
0840b4c359 | ||
|
|
696ee7dbe5 | ||
|
|
5e23e356ce | ||
|
|
c705051a89 | ||
|
|
ce2b918a29 | ||
|
|
df740b1e8e | ||
|
|
c3e0308ad0 | ||
|
|
1005e295b2 | ||
|
|
b3cf58b8c0 | ||
|
|
2652c87917 | ||
|
|
9e0b5575a4 | ||
|
|
56c089dc01 | ||
|
|
3b94da1790 | ||
|
|
ec39a8e9fe | ||
|
|
6339f971ca | ||
|
|
2978ad1486 | ||
|
|
c0d6dc48de | ||
|
|
f327422254 | ||
|
|
938d15d31b | ||
|
|
5ab1ddb9cb | ||
|
|
01302d7a60 | ||
|
|
c68630e2d6 | ||
|
|
db082bfbe9 | ||
|
|
650d88db46 | ||
|
|
7d1ecca669 | ||
|
|
5f71e629ae | ||
|
|
68205d4676 | ||
|
|
cdb466225d | ||
|
|
0e7fbd598f | ||
|
|
99147c893d | ||
|
|
c63fb6173d | ||
|
|
5932ada717 | ||
|
|
0d579748d6 | ||
|
|
8c5ee4eafe | ||
|
|
b03935ad2f | ||
|
|
38439cbc43 | ||
|
|
161c20b534 | ||
|
|
603696dad1 | ||
|
|
6b2ad5a7cc | ||
|
|
88063d4d87 | ||
|
|
8956a99745 | ||
|
|
0f0c0ec0d0 | ||
|
|
e5932e2c33 | ||
|
|
84ec8f5f3c | ||
|
|
661c5a269f | ||
|
|
5c6d7cae66 | ||
|
|
fcc73859b7 | ||
|
|
36645b0319 | ||
|
|
a62575e3cf | ||
|
|
d7af3d3c03 | ||
|
|
130ebb7d5f | ||
|
|
2af845ebd3 | ||
|
|
8e4d1701e6 | ||
|
|
4d16b6708f | ||
|
|
34ee08be25 | ||
|
|
d66a76a09e | ||
|
|
0d801c9766 | ||
|
|
b82b676fdb | ||
|
|
3494c0f64f | ||
|
|
311098adc2 | ||
|
|
58182e2083 | ||
|
|
a62ae43274 | ||
|
|
f256610e08 | ||
|
|
983d048219 | ||
|
|
3c6033f904 | ||
|
|
ef2bd2b59d | ||
|
|
04d70e9aa8 | ||
|
|
a2587ffc0a | ||
|
|
6776e7bb3d | ||
|
|
4c05064294 | ||
|
|
c135f1394f | ||
|
|
d68f4215f1 | ||
|
|
af562f3c3a | ||
|
|
7b949716bc | ||
|
|
d3e256289b | ||
|
|
3688e762b1 | ||
|
|
249f1a7af4 | ||
|
|
2de26030ff | ||
|
|
fcc76fb8d0 | ||
|
|
88d5b7095e | ||
|
|
b0e55d88de | ||
|
|
370ad3e928 | ||
|
|
07bf77d2dd | ||
|
|
a5ec65f3c0 | ||
|
|
522b318fd9 | ||
|
|
9eb2a4033f | ||
|
|
e87b0c393a | ||
|
|
1fb7e665fa | ||
|
|
7ea476d787 | ||
|
|
8260d07d61 | ||
|
|
ac0b4e6514 | ||
|
|
27b2f8cf27 | ||
|
|
27b5737f65 | ||
|
|
55b2e0292f | ||
|
|
464d83e70f | ||
|
|
614255a73a | ||
|
|
90d15e1346 | ||
|
|
b0e2ea64e9 | ||
|
|
1da05e239d | ||
|
|
fe7f0db81f | ||
|
|
983153e620 | ||
|
|
6fe791dcf2 | ||
|
|
1ad406c7dd | ||
|
|
4e032e11b1 | ||
|
|
ea34516d73 | ||
|
|
e1145f35ee | ||
|
|
6864775b8a | ||
|
|
f28721b847 | ||
|
|
2dc174fd9d | ||
|
|
07142d0410 | ||
|
|
41bb16ca30 | ||
|
|
d8f1034858 | ||
|
|
52b3c49cdb | ||
|
|
c5cb1a5e96 | ||
|
|
92d9d3232c | ||
|
|
9c4e0464f0 | ||
|
|
72d25754fd | ||
|
|
1465a0ba59 | ||
|
|
ac8ce28286 | ||
|
|
c4b06e1915 | ||
|
|
f77675a8a3 | ||
|
|
b907c1fd03 | ||
|
|
fba86bf653 | ||
|
|
b18ebcc38d | ||
|
|
4f7f18458e | ||
|
|
d412196052 | ||
|
|
1d140d8fd2 | ||
|
|
6948a25b09 | ||
|
|
26131917e3 | ||
|
|
44a0ab6d0a | ||
|
|
2b8b033ad7 | ||
|
|
3ee0b3e7df | ||
|
|
927a55ab30 | ||
|
|
b70721cb60 | ||
|
|
f71c820f15 | ||
|
|
74e0405a5e | ||
|
|
79b55ba30a | ||
|
|
ee0adaebc5 | ||
|
|
83c5c976e3 | ||
|
|
18bd2c607e | ||
|
|
e2695ce327 | ||
|
|
3f316fcaea | ||
|
|
8b7b162c76 | ||
|
|
aa36629def | ||
|
|
ca345bd6d8 | ||
|
|
61324d10f9 | ||
|
|
92fd92ae63 |
@@ -1,8 +1,11 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = {
|
||||
arrowParens: 'avoid',
|
||||
jsxSingleQuote: true,
|
||||
semi: false,
|
||||
singleQuote: true,
|
||||
trailingComma: 'es5',
|
||||
|
||||
// 2020-11-24: Requested by nraynaud and approved by the rest of the team
|
||||
//
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const LRU = require('lru-cache')
|
||||
const Fuse = require('fuse-native')
|
||||
const { VhdSynthetic } = require('vhd-lib')
|
||||
const { Disposable, fromCallback } = require('promise-toolbox')
|
||||
import LRU from 'lru-cache'
|
||||
import Fuse from 'fuse-native'
|
||||
import { VhdSynthetic } from 'vhd-lib'
|
||||
import { Disposable, fromCallback } from 'promise-toolbox'
|
||||
|
||||
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
|
||||
const stat = st => ({
|
||||
@@ -16,7 +14,7 @@ const stat = st => ({
|
||||
gid: st.gid !== undefined ? st.gid : process.getgid(),
|
||||
})
|
||||
|
||||
exports.mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
|
||||
export const mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
|
||||
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
|
||||
|
||||
const cache = new LRU({
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@vates/fuse-vhd",
|
||||
"version": "1.0.0",
|
||||
"version": "2.0.0",
|
||||
"license": "ISC",
|
||||
"private": false,
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
|
||||
@@ -15,13 +15,14 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10.0"
|
||||
"node": ">=14"
|
||||
},
|
||||
"main": "./index.mjs",
|
||||
"dependencies": {
|
||||
"fuse-native": "^2.2.6",
|
||||
"lru-cache": "^7.14.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"vhd-lib": "^4.4.1"
|
||||
"vhd-lib": "^4.5.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
|
||||
32
@vates/nbd-client/bench.mjs
Normal file
32
@vates/nbd-client/bench.mjs
Normal file
@@ -0,0 +1,32 @@
|
||||
import NbdClient from "./client.mjs";
|
||||
|
||||
|
||||
|
||||
async function bench(){
|
||||
const client = new NbdClient({
|
||||
address:'localhost',
|
||||
port: 9000,
|
||||
exportname: 'bench_export'
|
||||
})
|
||||
await client.connect()
|
||||
console.log('connected', client.exportSize)
|
||||
|
||||
for(let chunk_size=16*1024; chunk_size < 16*1024*1024; chunk_size *=2){
|
||||
|
||||
|
||||
let i=0
|
||||
const start = + new Date()
|
||||
for await(const block of client.readBlocks(chunk_size) ){
|
||||
i++
|
||||
if((i*chunk_size) % (16*1024*1024) ===0){
|
||||
process.stdout.write('.')
|
||||
}
|
||||
if(i*chunk_size > 1024*1024*1024) break
|
||||
}
|
||||
console.log(chunk_size,Math.round( (i*chunk_size/1024/1024*1000)/ (new Date() - start)))
|
||||
|
||||
}
|
||||
await client.disconnect()
|
||||
}
|
||||
|
||||
bench()
|
||||
@@ -1,8 +1,11 @@
|
||||
'use strict'
|
||||
const assert = require('node:assert')
|
||||
const { Socket } = require('node:net')
|
||||
const { connect } = require('node:tls')
|
||||
const {
|
||||
import assert from 'node:assert'
|
||||
import { Socket } from 'node:net'
|
||||
import { connect } from 'node:tls'
|
||||
import { fromCallback, pRetry, pDelay, pTimeout } from 'promise-toolbox'
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import {
|
||||
INIT_PASSWD,
|
||||
NBD_CMD_READ,
|
||||
NBD_DEFAULT_BLOCK_SIZE,
|
||||
@@ -17,16 +20,13 @@ const {
|
||||
NBD_REQUEST_MAGIC,
|
||||
OPTS_MAGIC,
|
||||
NBD_CMD_DISC,
|
||||
} = require('./constants.js')
|
||||
const { fromCallback, pRetry, pDelay, pTimeout } = require('promise-toolbox')
|
||||
const { readChunkStrict } = require('@vates/read-chunk')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
} from './constants.mjs'
|
||||
|
||||
const { warn } = createLogger('vates:nbd-client')
|
||||
|
||||
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
|
||||
|
||||
module.exports = class NbdClient {
|
||||
export default class NbdClient {
|
||||
#serverAddress
|
||||
#serverCert
|
||||
#serverPort
|
||||
@@ -74,7 +74,7 @@ module.exports = class NbdClient {
|
||||
this.#serverSocket = connect({
|
||||
socket: this.#serverSocket,
|
||||
rejectUnauthorized: false,
|
||||
cert: this.#serverCert,
|
||||
cert: this.#serverCert
|
||||
})
|
||||
this.#serverSocket.once('error', reject)
|
||||
this.#serverSocket.once('secureConnect', () => {
|
||||
@@ -88,7 +88,11 @@ module.exports = class NbdClient {
|
||||
async #unsecureConnect() {
|
||||
this.#serverSocket = new Socket()
|
||||
return new Promise((resolve, reject) => {
|
||||
this.#serverSocket.connect(this.#serverPort, this.#serverAddress)
|
||||
this.#serverSocket.connect({
|
||||
port:this.#serverPort,
|
||||
host: this.#serverAddress,
|
||||
// @todo should test the onRead to limit buffer copy
|
||||
})
|
||||
this.#serverSocket.once('error', reject)
|
||||
this.#serverSocket.once('connect', () => {
|
||||
this.#serverSocket.removeListener('error', reject)
|
||||
@@ -232,19 +236,20 @@ module.exports = class NbdClient {
|
||||
}
|
||||
try {
|
||||
this.#waitingForResponse = true
|
||||
const magic = await this.#readInt32()
|
||||
const buffer = await this.#read(4+4+8)
|
||||
const magic = buffer.readUInt32BE()
|
||||
|
||||
if (magic !== NBD_REPLY_MAGIC) {
|
||||
throw new Error(`magic number for block answer is wrong : ${magic} ${NBD_REPLY_MAGIC}`)
|
||||
}
|
||||
|
||||
const error = await this.#readInt32()
|
||||
const error = buffer.readUInt32BE(4)
|
||||
if (error !== 0) {
|
||||
// @todo use error code from constants.mjs
|
||||
throw new Error(`GOT ERROR CODE : ${error}`)
|
||||
}
|
||||
|
||||
const blockQueryId = await this.#readInt64()
|
||||
const blockQueryId = buffer.readBigUInt64BE(8)
|
||||
const query = this.#commandQueryBacklog.get(blockQueryId)
|
||||
if (!query) {
|
||||
throw new Error(` no query associated with id ${blockQueryId}`)
|
||||
@@ -307,24 +312,26 @@ module.exports = class NbdClient {
|
||||
})
|
||||
}
|
||||
|
||||
async *readBlocks(indexGenerator) {
|
||||
async *readBlocks(indexGenerator = 2*1024*1024) {
|
||||
// default : read all blocks
|
||||
if (indexGenerator === undefined) {
|
||||
if (typeof indexGenerator === 'number') {
|
||||
const exportSize = this.#exportSize
|
||||
const chunkSize = 2 * 1024 * 1024
|
||||
const chunkSize = indexGenerator
|
||||
indexGenerator = function* () {
|
||||
const nbBlocks = Math.ceil(exportSize / chunkSize)
|
||||
for (let index = 0; index < nbBlocks; index++) {
|
||||
const nbBlocks = Math.ceil(Number(exportSize / BigInt(chunkSize)))
|
||||
for (let index = 0; BigInt(index) < nbBlocks; index++) {
|
||||
yield { index, size: chunkSize }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const readAhead = []
|
||||
const readAheadMaxLength = this.#readAhead
|
||||
const makeReadBlockPromise = (index, size) => {
|
||||
const promise = pRetry(() => this.readBlock(index, size), {
|
||||
tries: this.#readBlockRetries,
|
||||
onRetry: async err => {
|
||||
console.error(err)
|
||||
warn('will retry reading block ', index, err)
|
||||
await this.reconnect()
|
||||
},
|
||||
@@ -336,6 +343,7 @@ module.exports = class NbdClient {
|
||||
|
||||
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
|
||||
for (const { index, size } of indexGenerator()) {
|
||||
|
||||
// stack readAheadMaxLength promises before starting to handle the results
|
||||
if (readAhead.length === readAheadMaxLength) {
|
||||
// any error will stop reading blocks
|
||||
@@ -348,4 +356,4 @@ module.exports = class NbdClient {
|
||||
yield readAhead.shift()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
'use strict'
|
||||
exports.INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
|
||||
exports.OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
|
||||
exports.NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
|
||||
exports.NBD_OPT_EXPORT_NAME = 1
|
||||
exports.NBD_OPT_ABORT = 2
|
||||
exports.NBD_OPT_LIST = 3
|
||||
exports.NBD_OPT_STARTTLS = 5
|
||||
exports.NBD_OPT_INFO = 6
|
||||
exports.NBD_OPT_GO = 7
|
||||
|
||||
exports.NBD_FLAG_HAS_FLAGS = 1 << 0
|
||||
exports.NBD_FLAG_READ_ONLY = 1 << 1
|
||||
exports.NBD_FLAG_SEND_FLUSH = 1 << 2
|
||||
exports.NBD_FLAG_SEND_FUA = 1 << 3
|
||||
exports.NBD_FLAG_ROTATIONAL = 1 << 4
|
||||
exports.NBD_FLAG_SEND_TRIM = 1 << 5
|
||||
|
||||
exports.NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
|
||||
|
||||
exports.NBD_CMD_FLAG_FUA = 1 << 0
|
||||
exports.NBD_CMD_FLAG_NO_HOLE = 1 << 1
|
||||
exports.NBD_CMD_FLAG_DF = 1 << 2
|
||||
exports.NBD_CMD_FLAG_REQ_ONE = 1 << 3
|
||||
exports.NBD_CMD_FLAG_FAST_ZERO = 1 << 4
|
||||
|
||||
exports.NBD_CMD_READ = 0
|
||||
exports.NBD_CMD_WRITE = 1
|
||||
exports.NBD_CMD_DISC = 2
|
||||
exports.NBD_CMD_FLUSH = 3
|
||||
exports.NBD_CMD_TRIM = 4
|
||||
exports.NBD_CMD_CACHE = 5
|
||||
exports.NBD_CMD_WRITE_ZEROES = 6
|
||||
exports.NBD_CMD_BLOCK_STATUS = 7
|
||||
exports.NBD_CMD_RESIZE = 8
|
||||
|
||||
exports.NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
|
||||
exports.NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
|
||||
exports.NBD_REPLY_ACK = 1
|
||||
|
||||
exports.NBD_DEFAULT_PORT = 10809
|
||||
exports.NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
|
||||
81
@vates/nbd-client/constants.mjs
Normal file
81
@vates/nbd-client/constants.mjs
Normal file
@@ -0,0 +1,81 @@
|
||||
// https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
|
||||
|
||||
export const INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
|
||||
export const OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
|
||||
export const NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
|
||||
|
||||
export const NBD_OPT_EXPORT_NAME = 1
|
||||
export const NBD_OPT_ABORT = 2
|
||||
export const NBD_OPT_LIST = 3
|
||||
export const NBD_OPT_STARTTLS = 5
|
||||
export const NBD_OPT_INFO = 6
|
||||
export const NBD_OPT_GO = 7
|
||||
export const NBD_OPT_STRUCTURED_REPLY = 8
|
||||
export const NBD_OPT_LIST_META_CONTEXT = 9
|
||||
export const NBD_OPT_SET_META_CONTEXT = 10
|
||||
export const NBD_OPT_EXTENDED_HEADERS = 11
|
||||
|
||||
export const NBD_REP_ACK =1
|
||||
export const NBD_REP_SERVER = 2
|
||||
export const NBD_REP_INFO = 3
|
||||
export const NBD_REP_META_CONTEXT = 4
|
||||
export const NBD_REP_ERR_UNSUP = 0x80000001 // 2^32+1
|
||||
export const NBD_REP_ERR_POLICY = 0x80000002
|
||||
export const NBD_REP_ERR_INVALID = 0x80000003
|
||||
export const NBD_REP_ERR_PLATFORM = 0x80000004
|
||||
export const NBD_REP_ERR_TLS_REQD = 0x80000005
|
||||
export const NBD_REP_ERR_UNKNOWN = 0x80000006
|
||||
export const NBD_REP_ERR_SHUTDOWN = 0x80000007
|
||||
export const NBD_REP_ERR_BLOCK_SIZE_REQD = 0x80000008
|
||||
export const NBD_REP_ERR_TOO_BIG = 0x80000009
|
||||
export const NBD_REP_ERR_EXT_HEADER_REQD = 0x8000000a
|
||||
|
||||
export const NBD_INFO_EXPORT = 0
|
||||
export const NBD_INFO_NAME = 1
|
||||
export const NBD_INFO_DESCRIPTION = 2
|
||||
export const NBD_INFO_BLOCK_SIZE = 3
|
||||
|
||||
|
||||
export const NBD_FLAG_HAS_FLAGS = 1 << 0
|
||||
export const NBD_FLAG_READ_ONLY = 1 << 1
|
||||
export const NBD_FLAG_SEND_FLUSH = 1 << 2
|
||||
export const NBD_FLAG_SEND_FUA = 1 << 3
|
||||
export const NBD_FLAG_ROTATIONAL = 1 << 4
|
||||
export const NBD_FLAG_SEND_TRIM = 1 << 5
|
||||
export const NBD_FLAG_SEND_WRITE_ZEROES = 1 << 6
|
||||
export const NBD_FLAG_SEND_DF = 1 << 7
|
||||
export const NBD_FLAG_CAN_MULTI_CONN = 1 << 8
|
||||
|
||||
export const NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
|
||||
|
||||
export const NBD_CMD_FLAG_FUA = 1 << 0
|
||||
export const NBD_CMD_FLAG_NO_HOLE = 1 << 1
|
||||
export const NBD_CMD_FLAG_DF = 1 << 2
|
||||
export const NBD_CMD_FLAG_REQ_ONE = 1 << 3
|
||||
export const NBD_CMD_FLAG_FAST_ZERO = 1 << 4
|
||||
|
||||
export const NBD_CMD_READ = 0
|
||||
export const NBD_CMD_WRITE = 1
|
||||
export const NBD_CMD_DISC = 2
|
||||
export const NBD_CMD_FLUSH = 3
|
||||
export const NBD_CMD_TRIM = 4
|
||||
export const NBD_CMD_CACHE = 5
|
||||
export const NBD_CMD_WRITE_ZEROES = 6
|
||||
export const NBD_CMD_BLOCK_STATUS = 7
|
||||
export const NBD_CMD_RESIZE = 8
|
||||
|
||||
export const NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
|
||||
export const NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
|
||||
export const NBD_REPLY_ACK = 1
|
||||
export const NBD_SIMPLE_REPLY_MAGIC = 0x67446698
|
||||
export const NBD_STRUCTURED_REPLY_MAGIC = 0x668e33ef
|
||||
export const NBD_REPLY_TYPE_NONE = 0
|
||||
export const NBD_REPLY_TYPE_OFFSET_DATA = 1
|
||||
export const NBD_REPLY_TYPE_OFFSET_HOLE = 2
|
||||
export const NBD_REPLY_TYPE_BLOCK_STATUS = 5
|
||||
export const NBD_REPLY_TYPE_ERROR = 1 << 15 +1
|
||||
export const NBD_REPLY_TYPE_ERROR_OFFSET = 1 << 15 +2
|
||||
|
||||
|
||||
export const NBD_DEFAULT_PORT = 10809
|
||||
export const NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
|
||||
@@ -1,76 +0,0 @@
|
||||
'use strict'
|
||||
const NbdClient = require('./index.js')
|
||||
const { spawn } = require('node:child_process')
|
||||
const fs = require('node:fs/promises')
|
||||
const { test } = require('tap')
|
||||
const tmp = require('tmp')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
|
||||
const FILE_SIZE = 2 * 1024 * 1024
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
const data = Buffer.alloc(size, 0)
|
||||
for (let i = 0; i < size; i += 4) {
|
||||
data.writeUInt32BE(i, i)
|
||||
}
|
||||
await fs.writeFile(tmpPath, data)
|
||||
|
||||
return tmpPath
|
||||
}
|
||||
|
||||
test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
const nbdServer = spawn(
|
||||
'nbdkit',
|
||||
[
|
||||
'file',
|
||||
path,
|
||||
'--newstyle', //
|
||||
'--exit-with-parent',
|
||||
'--read-only',
|
||||
'--export-name=MY_SECRET_EXPORT',
|
||||
],
|
||||
{
|
||||
stdio: ['inherit', 'inherit', 'inherit'],
|
||||
}
|
||||
)
|
||||
|
||||
const client = new NbdClient({
|
||||
address: 'localhost',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
secure: false,
|
||||
})
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const CHUNK_SIZE = 128 * 1024 // non default size
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
}
|
||||
// read mutiple blocks in parallel
|
||||
await asyncEach(
|
||||
indexes,
|
||||
async i => {
|
||||
const block = await client.readBlock(i, CHUNK_SIZE)
|
||||
let blockOk = true
|
||||
let firstFail
|
||||
for (let j = 0; j < CHUNK_SIZE; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
if (!blockOk && firstFail === undefined) {
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content`)
|
||||
},
|
||||
{ concurrency: 8 }
|
||||
)
|
||||
await client.disconnect()
|
||||
nbdServer.kill()
|
||||
await fs.unlink(path)
|
||||
})
|
||||
@@ -13,17 +13,18 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.2.0",
|
||||
"version": "2.0.0",
|
||||
"engines": {
|
||||
"node": ">=14.0"
|
||||
},
|
||||
"main": "./index.mjs",
|
||||
"dependencies": {
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/read-chunk": "^1.1.1",
|
||||
"@vates/read-chunk": "^1.2.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"xen-api": "^1.3.1"
|
||||
"xen-api": "^1.3.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.3.0",
|
||||
@@ -31,6 +32,6 @@
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test-integration": "tap --lines 70 --functions 36 --branches 54 --statements 69 *.integ.js"
|
||||
"test-integration": "tap --lines 97 --functions 95 --branches 74 --statements 97 tests/*.integ.mjs"
|
||||
}
|
||||
}
|
||||
|
||||
292
@vates/nbd-client/server.mjs
Normal file
292
@vates/nbd-client/server.mjs
Normal file
@@ -0,0 +1,292 @@
|
||||
import assert, { deepEqual, strictEqual, notStrictEqual } from 'node:assert'
|
||||
import { createServer } from 'node:net'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
import {
|
||||
INIT_PASSWD,
|
||||
NBD_CMD_READ,
|
||||
NBD_DEFAULT_PORT,
|
||||
NBD_FLAG_FIXED_NEWSTYLE,
|
||||
NBD_FLAG_HAS_FLAGS,
|
||||
NBD_OPT_EXPORT_NAME,
|
||||
NBD_OPT_REPLY_MAGIC,
|
||||
NBD_REPLY_ACK,
|
||||
NBD_REQUEST_MAGIC,
|
||||
OPTS_MAGIC,
|
||||
NBD_CMD_DISC,
|
||||
NBD_REP_ERR_UNSUP,
|
||||
NBD_CMD_WRITE,
|
||||
NBD_OPT_GO,
|
||||
NBD_OPT_INFO,
|
||||
NBD_INFO_EXPORT,
|
||||
NBD_REP_INFO,
|
||||
NBD_SIMPLE_REPLY_MAGIC,
|
||||
NBD_REP_ERR_UNKNOWN,
|
||||
} from './constants.mjs'
|
||||
import { PassThrough } from 'node:stream'
|
||||
|
||||
export default class NbdServer {
|
||||
#server
|
||||
#clients = new Map()
|
||||
constructor(port = NBD_DEFAULT_PORT) {
|
||||
this.#server = createServer()
|
||||
this.#server.listen(port)
|
||||
this.#server.on('connection', client => this.#handleNewConnection(client))
|
||||
}
|
||||
|
||||
// will wait for a client to connect and upload the file to this server
|
||||
downloadStream(key) {
|
||||
strictEqual(this.#clients.has(key), false)
|
||||
const stream = new PassThrough()
|
||||
const offset = BigInt(0)
|
||||
this.#clients.set(key, { length: BigInt(2 * 1024 * 1024 * 1024 * 1024), stream, offset, key })
|
||||
return stream
|
||||
}
|
||||
|
||||
// will wait for a client to connect and downlaod this stream
|
||||
uploadStream(key, source, length) {
|
||||
strictEqual(this.#clients.has(key), false)
|
||||
notStrictEqual(length, undefined)
|
||||
const offset = BigInt(0)
|
||||
this.#clients.set(key, { length: BigInt(length), stream: source, offset, key })
|
||||
}
|
||||
|
||||
#read(socket, length) {
|
||||
return readChunkStrict(socket, length)
|
||||
}
|
||||
async #readInt32(socket) {
|
||||
const buffer = await this.#read(socket, 4)
|
||||
return buffer.readUInt32BE()
|
||||
}
|
||||
|
||||
#write(socket, buffer) {
|
||||
return fromCallback.call(socket, 'write', buffer)
|
||||
}
|
||||
async #writeInt16(socket, int16) {
|
||||
const buffer = Buffer.alloc(2)
|
||||
buffer.writeUInt16BE(int16)
|
||||
return this.#write(socket, buffer)
|
||||
}
|
||||
async #writeInt32(socket, int32) {
|
||||
const buffer = Buffer.alloc(4)
|
||||
buffer.writeUInt32BE(int32)
|
||||
return this.#write(socket, buffer)
|
||||
}
|
||||
async #writeInt64(socket, int64) {
|
||||
const buffer = Buffer.alloc(8)
|
||||
buffer.writeBigUInt64BE(int64)
|
||||
return this.#write(socket, buffer)
|
||||
}
|
||||
|
||||
async #openExport(key) {
|
||||
if (!this.#clients.has(key)) {
|
||||
// export does not exists
|
||||
const err = new Error('Export not found ')
|
||||
err.code = 'ENOTFOUND'
|
||||
throw err
|
||||
}
|
||||
const { length } = this.#clients.get(key)
|
||||
return length
|
||||
}
|
||||
|
||||
async #sendOptionResponse(socket, option, response, data = Buffer.alloc(0)) {
|
||||
await this.#writeInt64(socket, NBD_OPT_REPLY_MAGIC)
|
||||
await this.#writeInt32(socket, option)
|
||||
await this.#writeInt32(socket, response)
|
||||
await this.#writeInt32(socket, data.length)
|
||||
await this.#write(socket, data)
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {Socket} socket
|
||||
* @returns true if server is waiting for more options
|
||||
*/
|
||||
async #readOption(socket) {
|
||||
console.log('wait for option')
|
||||
const magic = await this.#read(socket, 8)
|
||||
console.log(magic.toString('ascii'), magic.length, OPTS_MAGIC.toString('ascii'))
|
||||
deepEqual(magic, OPTS_MAGIC)
|
||||
const option = await this.#readInt32(socket)
|
||||
const length = await this.#readInt32(socket)
|
||||
console.log({ option, length })
|
||||
const data = length > 0 ? await this.#read(socket, length) : undefined
|
||||
switch (option) {
|
||||
case NBD_OPT_EXPORT_NAME: {
|
||||
const exportNameLength = data.readInt32BE()
|
||||
const key = data.slice(4, exportNameLength + 4).toString()
|
||||
let exportSize
|
||||
try {
|
||||
exportSize = await this.#openExport(key)
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOTFOUND') {
|
||||
this.#sendOptionResponse(socket, option, NBD_REP_ERR_UNKNOWN)
|
||||
return false
|
||||
}
|
||||
throw err
|
||||
}
|
||||
socket.key = key
|
||||
await this.#writeInt64(socket, exportSize)
|
||||
await this.#writeInt16(socket, NBD_FLAG_HAS_FLAGS /* transmission flag */)
|
||||
await this.#write(socket, Buffer.alloc(124) /* padding */)
|
||||
|
||||
return false
|
||||
}
|
||||
/*
|
||||
case NBD_OPT_STARTTLS:
|
||||
console.log('starttls')
|
||||
// @todo not working
|
||||
return true
|
||||
*/
|
||||
|
||||
case NBD_OPT_GO:
|
||||
case NBD_OPT_INFO: {
|
||||
const exportNameLength = data.readInt32BE()
|
||||
const key = data.slice(4, exportNameLength + 4).toString()
|
||||
let exportSize
|
||||
try {
|
||||
exportSize = await this.#openExport(key)
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOTFOUND') {
|
||||
this.#sendOptionResponse(socket, option, NBD_REP_ERR_UNKNOWN)
|
||||
// @todo should disconnect
|
||||
return false
|
||||
}
|
||||
throw err
|
||||
}
|
||||
socket.key = key
|
||||
await this.#writeInt64(socket, NBD_OPT_REPLY_MAGIC)
|
||||
await this.#writeInt32(socket, option)
|
||||
await this.#writeInt32(socket, NBD_REP_INFO)
|
||||
await this.#writeInt32(socket, 12)
|
||||
// the export info
|
||||
await this.#writeInt16(socket, NBD_INFO_EXPORT)
|
||||
await this.#writeInt64(socket, exportSize)
|
||||
await this.#writeInt16(socket, NBD_FLAG_HAS_FLAGS /* transmission flag */)
|
||||
|
||||
// an ACK at the end of the infos
|
||||
await this.#sendOptionResponse(socket, option, NBD_REPLY_ACK) // no additionnal data
|
||||
return option === NBD_OPT_INFO // we stays in option phase is option is INFO
|
||||
}
|
||||
default:
|
||||
// not supported
|
||||
console.log('not supported', option, length, data?.toString())
|
||||
await this.#sendOptionResponse(socket, option, NBD_REP_ERR_UNSUP) // no additionnal data
|
||||
// wait for next option
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
async #readCommand(socket) {
|
||||
const key = socket.key
|
||||
// this socket has an export key
|
||||
notStrictEqual(key, undefined)
|
||||
// this export key is still valid
|
||||
strictEqual(this.#clients.has(key), true)
|
||||
const client = this.#clients.get(key)
|
||||
|
||||
const buffer = await this.#read(socket, 28)
|
||||
const magic = buffer.readInt32BE(0)
|
||||
strictEqual(magic, NBD_REQUEST_MAGIC)
|
||||
/* const commandFlags = */ buffer.readInt16BE(4)
|
||||
const command = buffer.readInt16BE(6)
|
||||
const cookie = buffer.readBigUInt64BE(8)
|
||||
const offset = buffer.readBigUInt64BE(16)
|
||||
const length = buffer.readInt32BE(24)
|
||||
switch (command) {
|
||||
case NBD_CMD_DISC:
|
||||
console.log('gotdisconnect', client.offset)
|
||||
await client.stream?.destroy()
|
||||
// @todo : disconnect
|
||||
return false
|
||||
case NBD_CMD_READ: {
|
||||
/** simple replies */
|
||||
|
||||
// read length byte from offset in export
|
||||
|
||||
// the client is writing in contiguous mode
|
||||
assert.strictEqual(offset, client.offset)
|
||||
client.offset += BigInt(length)
|
||||
const data = await readChunkStrict(client.stream, length)
|
||||
const reply = Buffer.alloc(16)
|
||||
reply.writeInt32BE(NBD_SIMPLE_REPLY_MAGIC)
|
||||
reply.writeInt32BE(0, 4) // no error
|
||||
reply.writeBigInt64BE(cookie, 8)
|
||||
await this.#write(socket, reply)
|
||||
await this.#write(socket, data)
|
||||
/* if we implement non stream read, we can handle read in parallel
|
||||
const reply = Buffer.alloc(16+length)
|
||||
reply.writeInt32BE(NBD_SIMPLE_REPLY_MAGIC)
|
||||
reply.writeInt32BE(0,4)// no error
|
||||
reply.writeBigInt64BE(cookie,8)
|
||||
|
||||
// read length byte from offset in export directly in the given buffer
|
||||
// may do multiple read in parallel on the same export
|
||||
size += length
|
||||
socket.fd.read(reply, 16, length, Number(offset))
|
||||
.then(()=>{
|
||||
return this.#write(socket, reply)
|
||||
})
|
||||
.catch(err => console.error('NBD_CMD_READ',err)) */
|
||||
return true
|
||||
}
|
||||
case NBD_CMD_WRITE: {
|
||||
// the client is writing in contiguous mode
|
||||
assert.strictEqual(offset, client.offset)
|
||||
|
||||
const data = await this.#read(socket, length)
|
||||
client.offset += BigInt(length)
|
||||
await new Promise((resolve, reject) => {
|
||||
if (!client.stream.write(data, 0, length, Number(offset))) {
|
||||
client.stream.once('drain', err => (err ? reject(err) : resolve()))
|
||||
} else {
|
||||
process.nextTick(resolve)
|
||||
}
|
||||
})
|
||||
const reply = Buffer.alloc(16)
|
||||
reply.writeInt32BE(NBD_SIMPLE_REPLY_MAGIC)
|
||||
reply.writeInt32BE(0, 4) // no error
|
||||
reply.writeBigInt64BE(cookie, 8)
|
||||
await this.#write(socket, reply)
|
||||
return true
|
||||
}
|
||||
default:
|
||||
console.log('GOT unsupported command ', command)
|
||||
// fail to handle
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
async #handleNewConnection(socket) {
|
||||
const remoteAddress = socket.remoteAddress + ':' + socket.remotePort
|
||||
console.log('new client connection from %s', remoteAddress)
|
||||
|
||||
socket.on('close', () => {
|
||||
console.log('client ', remoteAddress, 'is done')
|
||||
})
|
||||
socket.on('error', error => {
|
||||
throw error
|
||||
})
|
||||
// handshake
|
||||
await this.#write(socket, INIT_PASSWD)
|
||||
await this.#write(socket, OPTS_MAGIC)
|
||||
|
||||
// send flags , the bare minimum
|
||||
await this.#writeInt16(socket, NBD_FLAG_FIXED_NEWSTYLE)
|
||||
const clientFlag = await this.#readInt32(socket)
|
||||
assert.strictEqual(clientFlag & NBD_FLAG_FIXED_NEWSTYLE, NBD_FLAG_FIXED_NEWSTYLE) // only FIXED_NEWSTYLE one is supported from the server options
|
||||
|
||||
// read client response flags
|
||||
let waitingForOptions = true
|
||||
while (waitingForOptions) {
|
||||
waitingForOptions = await this.#readOption(socket)
|
||||
}
|
||||
|
||||
let waitingForCommand = true
|
||||
while (waitingForCommand) {
|
||||
waitingForCommand = await this.#readCommand(socket)
|
||||
}
|
||||
}
|
||||
|
||||
#handleClientData(client, data) {}
|
||||
}
|
||||
182
@vates/nbd-client/tests/ca-cert.pem
Normal file
182
@vates/nbd-client/tests/ca-cert.pem
Normal file
@@ -0,0 +1,182 @@
|
||||
Public Key Info:
|
||||
Public Key Algorithm: RSA
|
||||
Key Security Level: High (3072 bits)
|
||||
|
||||
modulus:
|
||||
00:be:92:be:df:de:0a:ab:38:fc:1a:c0:1a:58:4d:86
|
||||
b8:1f:25:10:7d:19:05:17:bf:02:3d:e9:ef:f8:c0:04
|
||||
5d:6f:98:de:5c:dd:c3:0f:e2:61:61:e4:b5:9c:42:ac
|
||||
3e:af:fd:30:10:e1:54:32:66:75:f6:80:90:85:05:a0
|
||||
6a:14:a2:6f:a7:2e:f0:f3:52:94:2a:f2:34:fc:0d:b4
|
||||
fb:28:5d:1c:11:5c:59:6e:63:34:ba:b3:fd:73:b1:48
|
||||
35:00:84:53:da:6a:9b:84:ab:64:b1:a1:2b:3a:d1:5a
|
||||
d7:13:7c:12:2a:4e:72:e9:96:d6:30:74:c5:71:05:14
|
||||
4b:2d:01:94:23:67:4e:37:3c:1e:c1:a0:bc:34:04:25
|
||||
21:11:fb:4b:6b:53:74:8f:90:93:57:af:7f:3b:78:d6
|
||||
a4:87:fe:7d:ed:20:11:8b:70:54:67:b8:c9:f5:c0:6b
|
||||
de:4e:e7:a5:79:ff:f7:ad:cf:10:57:f5:51:70:7b:54
|
||||
68:28:9e:b9:c2:10:7b:ab:aa:11:47:9f:ec:e6:2f:09
|
||||
44:4a:88:5b:dd:8c:10:b4:c4:03:25:06:d9:e0:9f:a0
|
||||
0d:cf:94:4b:3b:fa:a5:17:2c:e4:67:c4:17:6a:ab:d8
|
||||
c8:7a:16:41:b9:91:b7:9c:ae:8c:94:be:26:61:51:71
|
||||
c1:a6:39:39:97:75:28:a9:0e:21:ea:f0:bd:71:4a:8c
|
||||
e1:f8:1d:a9:22:2f:10:a8:1b:e5:a4:9a:fd:0f:fa:c6
|
||||
20:bc:96:99:79:c6:ba:a4:1f:3e:d4:91:c5:af:bb:71
|
||||
0a:5a:ef:69:9c:64:69:ce:5a:fe:3f:c2:24:f4:26:d4
|
||||
3d:ab:ab:9a:f0:f6:f1:b1:64:a9:f4:e2:34:6a:ab:2e
|
||||
95:47:b9:07:5a:39:c6:95:9c:a9:e8:ed:71:dd:c1:21
|
||||
16:c8:2d:4c:2c:af:06:9d:c6:fa:fe:c5:2a:6c:b4:c3
|
||||
d5:96:fc:5e:fd:ec:1c:30:b4:9d:cb:29:ef:a8:50:1c
|
||||
21:
|
||||
|
||||
public exponent:
|
||||
01:00:01:
|
||||
|
||||
private exponent:
|
||||
25:37:c5:7d:35:01:02:65:73:9e:c9:cb:9b:59:30:a9
|
||||
3e:b3:df:5f:7f:06:66:97:d0:19:45:59:af:4b:d8:ce
|
||||
62:a0:09:35:3b:bd:ff:99:27:89:95:bf:fe:0f:6b:52
|
||||
26:ce:9c:97:7f:5a:11:29:bf:79:ef:ab:c9:be:ca:90
|
||||
4d:0d:58:1e:df:65:01:30:2c:6d:a2:b5:c4:4f:ec:fb
|
||||
6b:eb:9b:32:ac:c5:6e:70:83:78:be:f4:0d:a7:1e:c1
|
||||
f3:22:e4:b9:70:3e:85:0f:6f:ef:dc:d8:f3:78:b5:73
|
||||
f1:83:36:8c:fa:9b:28:91:63:ad:3c:f0:de:5c:ae:94
|
||||
eb:ea:36:03:20:06:bf:74:c7:50:eb:52:36:1a:65:21
|
||||
eb:40:17:7f:93:61:dd:33:d0:02:bc:ec:6d:31:f1:41
|
||||
5a:a9:d1:f0:00:66:4c:c4:18:47:d5:67:e3:cd:bb:83
|
||||
44:07:ab:62:83:21:dc:d8:e6:89:37:08:bb:9d:ea:62
|
||||
c2:5d:ce:85:c2:dc:48:27:0c:a4:23:61:b7:30:e7:26
|
||||
44:dc:1e:5c:2e:16:35:2b:2e:a6:e6:a4:ce:1f:9b:e9
|
||||
fe:96:fa:49:1d:fb:2a:df:bc:bf:46:da:52:f8:37:8a
|
||||
84:ab:e4:73:e6:46:56:b5:b4:3d:e1:63:eb:02:8e:d7
|
||||
67:96:c4:dc:28:6d:6b:b6:0c:a3:0b:db:87:29:ad:f9
|
||||
ec:73:b6:55:a3:40:32:13:84:c7:2f:33:74:04:dc:42
|
||||
00:11:9c:fb:fc:62:35:b3:82:c3:3c:28:80:e8:09:a8
|
||||
97:c7:c1:2e:3d:27:fa:4f:9b:fc:c2:34:58:41:5c:a1
|
||||
e2:70:2e:2f:82:ad:bd:bd:8e:dd:23:12:25:de:89:70
|
||||
60:75:48:90:80:ac:55:74:51:6f:49:9e:7f:63:41:8b
|
||||
3c:b1:f5:c3:6b:4b:5a:50:a6:4d:38:e8:82:c2:04:c8
|
||||
30:fd:06:9b:c1:04:27:b6:63:3a:5e:f5:4d:00:c3:d1
|
||||
|
||||
|
||||
prime1:
|
||||
00:f6:00:2e:7d:89:61:24:16:5e:87:ca:18:6c:03:b8
|
||||
b4:33:df:4a:a7:7f:db:ed:39:15:41:12:61:4f:4e:b4
|
||||
de:ab:29:d9:0c:6c:01:7e:53:2e:ee:e7:5f:a2:e4:6d
|
||||
c6:4b:07:4e:d8:a3:ae:45:06:97:bd:18:a3:e9:dd:29
|
||||
54:64:6d:f0:af:08:95:ae:ae:3e:71:63:76:2a:a1:18
|
||||
c4:b1:fc:bc:3d:42:15:74:b3:c5:38:1f:5d:92:f1:b2
|
||||
c6:3f:10:fe:35:1a:c6:b1:ce:70:38:ff:08:5c:de:61
|
||||
79:c7:50:91:22:4d:e9:c8:18:49:e2:5c:91:84:86:e2
|
||||
4d:0f:6e:9b:0d:81:df:aa:f3:59:75:56:e9:33:18:dd
|
||||
ab:39:da:e2:25:01:05:a1:6e:23:59:15:2c:89:35:c7
|
||||
ae:9c:c7:ea:88:9a:1a:f3:48:07:11:82:59:79:8c:62
|
||||
53:06:37:30:14:b3:82:b1:50:fc:ae:b8:f7:1c:57:44
|
||||
7d:
|
||||
|
||||
prime2:
|
||||
00:c6:51:cc:dc:88:2e:cf:98:90:10:19:e0:d3:a4:d1
|
||||
3f:dc:b0:29:d3:bb:26:ee:eb:00:17:17:d1:d1:bb:9b
|
||||
34:b1:4e:af:b5:6c:1c:54:53:b4:bb:55:da:f7:78:cd
|
||||
38:b4:2e:3a:8c:63:80:3b:64:9c:b4:2b:cd:dd:50:0b
|
||||
05:d2:00:7a:df:8e:c3:e6:29:e0:9c:d8:40:b7:11:09
|
||||
f4:38:df:f6:ed:93:1e:18:d4:93:fa:8d:ee:82:9c:0f
|
||||
c1:88:26:84:9d:4f:ae:8a:17:d5:55:54:4c:c6:0a:ac
|
||||
4d:ec:33:51:68:0f:4b:92:2e:04:57:fe:15:f5:00:46
|
||||
5c:8e:ad:09:2c:e7:df:d5:36:7a:4e:bd:da:21:22:d7
|
||||
58:b4:72:93:94:af:34:cc:e2:b8:d0:4f:0b:5d:97:08
|
||||
12:19:17:34:c5:15:49:00:48:56:13:b8:45:4e:3b:f8
|
||||
bc:d5:ab:d9:6d:c2:4a:cc:01:1a:53:4d:46:50:49:3b
|
||||
75:
|
||||
|
||||
coefficient:
|
||||
63:67:50:29:10:6a:85:a3:dc:51:90:20:76:86:8c:83
|
||||
8e:d5:ff:aa:75:fd:b5:f8:31:b0:96:6c:18:1d:5b:ed
|
||||
a4:2e:47:8d:9c:c2:1e:2c:a8:6d:4b:10:a5:c2:53:46
|
||||
8a:9a:84:91:d7:fc:f5:cc:03:ce:b9:3d:5c:01:d2:27
|
||||
99:7b:79:89:4f:a1:12:e3:05:5d:ee:10:f6:8c:e6:ce
|
||||
5e:da:32:56:6d:6f:eb:32:b4:75:7b:94:49:d8:2d:9e
|
||||
4d:19:59:2e:e4:0b:bc:95:df:df:65:67:a1:dd:c6:2b
|
||||
99:f4:76:e8:9f:fa:57:1d:ca:f9:58:a9:ce:9b:30:5c
|
||||
42:8a:ba:05:e7:e2:15:45:25:bc:e9:68:c1:8b:1a:37
|
||||
cc:e1:aa:45:2e:94:f5:81:47:1e:64:7f:c0:c1:b7:a8
|
||||
21:58:18:a9:a0:ed:e0:27:75:bf:65:81:6b:e4:1d:5a
|
||||
b7:7e:df:d8:28:c6:36:21:19:c8:6e:da:ca:9e:da:84
|
||||
|
||||
|
||||
exp1:
|
||||
00:ba:d7:fe:77:a9:0d:98:2c:49:56:57:c0:5e:e2:20
|
||||
ba:f6:1f:26:03:bc:d0:5d:08:9b:45:16:61:c4:ab:e2
|
||||
22:b1:dc:92:17:a6:3d:28:26:a4:22:1e:a8:7b:ff:86
|
||||
05:33:5d:74:9c:85:0d:cb:2d:ab:b8:9b:6b:7c:28:57
|
||||
c8:da:92:ca:59:17:6b:21:07:05:34:78:37:fb:3e:ea
|
||||
a2:13:12:04:23:7e:fa:ee:ed:cf:e0:c5:a9:fb:ff:0a
|
||||
2b:1b:21:9c:02:d7:b8:8c:ba:60:70:59:fc:8f:14:f4
|
||||
f2:5a:d9:ad:b2:61:7d:2c:56:8e:5f:98:b1:89:f8:2d
|
||||
10:1c:a5:84:ad:28:b4:aa:92:34:a3:34:04:e1:a3:84
|
||||
52:16:1a:52:e3:8a:38:2d:99:8a:cd:91:90:87:12:ca
|
||||
fc:ab:e6:08:14:03:00:6f:41:88:e4:da:9d:7c:fd:8c
|
||||
7c:c4:de:cb:ed:1d:3f:29:d0:7a:6b:76:df:71:ae:32
|
||||
bd:
|
||||
|
||||
exp2:
|
||||
4a:e9:d3:6c:ea:b4:64:0e:c9:3c:8b:c9:f5:a8:a8:b2
|
||||
6a:f6:d0:95:fe:78:32:7f:ea:c4:ce:66:9f:c7:32:55
|
||||
b1:34:7c:03:18:17:8b:73:23:2e:30:bc:4a:07:03:de
|
||||
8b:91:7a:e4:55:21:b7:4d:c6:33:f8:e8:06:d5:99:94
|
||||
55:43:81:26:b9:93:1e:7a:6b:32:54:2d:fd:f9:1d:bd
|
||||
77:4e:82:c4:33:72:87:06:a5:ef:5b:75:e1:38:7a:6b
|
||||
2c:b7:00:19:3c:64:3e:1d:ca:a4:34:f7:db:47:64:d6
|
||||
fa:86:58:15:ea:d1:2d:22:dc:d9:30:4d:b3:02:ab:91
|
||||
83:03:b2:17:98:6f:60:e6:f7:44:8f:4a:ba:81:a2:bf
|
||||
0b:4a:cc:9c:b9:a2:44:52:d0:65:3f:b6:97:5f:d9:d8
|
||||
9c:49:bb:d1:46:bd:10:b2:42:71:a8:85:e5:8b:99:e6
|
||||
1b:00:93:5d:76:ab:32:6c:a8:39:17:53:9c:38:4d:91
|
||||
|
||||
|
||||
|
||||
Public Key PIN:
|
||||
pin-sha256:ISh/UeFjUG5Gwrpx6hMUGQPvg9wOKjOkHmRbs4YjZqs=
|
||||
Public Key ID:
|
||||
sha256:21287f51e163506e46c2ba71ea13141903ef83dc0e2a33a41e645bb3862366ab
|
||||
sha1:1a48455111ac45fb5807c5cdb7b20b896c52f0b6
|
||||
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG4wIBAAKCAYEAvpK+394Kqzj8GsAaWE2GuB8lEH0ZBRe/Aj3p7/jABF1vmN5c
|
||||
3cMP4mFh5LWcQqw+r/0wEOFUMmZ19oCQhQWgahSib6cu8PNSlCryNPwNtPsoXRwR
|
||||
XFluYzS6s/1zsUg1AIRT2mqbhKtksaErOtFa1xN8EipOcumW1jB0xXEFFEstAZQj
|
||||
Z043PB7BoLw0BCUhEftLa1N0j5CTV69/O3jWpIf+fe0gEYtwVGe4yfXAa95O56V5
|
||||
//etzxBX9VFwe1RoKJ65whB7q6oRR5/s5i8JREqIW92MELTEAyUG2eCfoA3PlEs7
|
||||
+qUXLORnxBdqq9jIehZBuZG3nK6MlL4mYVFxwaY5OZd1KKkOIerwvXFKjOH4Haki
|
||||
LxCoG+Wkmv0P+sYgvJaZeca6pB8+1JHFr7txClrvaZxkac5a/j/CJPQm1D2rq5rw
|
||||
9vGxZKn04jRqqy6VR7kHWjnGlZyp6O1x3cEhFsgtTCyvBp3G+v7FKmy0w9WW/F79
|
||||
7BwwtJ3LKe+oUBwhAgMBAAECggGAJTfFfTUBAmVznsnLm1kwqT6z319/BmaX0BlF
|
||||
Wa9L2M5ioAk1O73/mSeJlb/+D2tSJs6cl39aESm/ee+ryb7KkE0NWB7fZQEwLG2i
|
||||
tcRP7Ptr65syrMVucIN4vvQNpx7B8yLkuXA+hQ9v79zY83i1c/GDNoz6myiRY608
|
||||
8N5crpTr6jYDIAa/dMdQ61I2GmUh60AXf5Nh3TPQArzsbTHxQVqp0fAAZkzEGEfV
|
||||
Z+PNu4NEB6tigyHc2OaJNwi7nepiwl3OhcLcSCcMpCNhtzDnJkTcHlwuFjUrLqbm
|
||||
pM4fm+n+lvpJHfsq37y/RtpS+DeKhKvkc+ZGVrW0PeFj6wKO12eWxNwobWu2DKML
|
||||
24cprfnsc7ZVo0AyE4THLzN0BNxCABGc+/xiNbOCwzwogOgJqJfHwS49J/pPm/zC
|
||||
NFhBXKHicC4vgq29vY7dIxIl3olwYHVIkICsVXRRb0mef2NBizyx9cNrS1pQpk04
|
||||
6ILCBMgw/QabwQQntmM6XvVNAMPRAoHBAPYALn2JYSQWXofKGGwDuLQz30qnf9vt
|
||||
ORVBEmFPTrTeqynZDGwBflMu7udfouRtxksHTtijrkUGl70Yo+ndKVRkbfCvCJWu
|
||||
rj5xY3YqoRjEsfy8PUIVdLPFOB9dkvGyxj8Q/jUaxrHOcDj/CFzeYXnHUJEiTenI
|
||||
GEniXJGEhuJND26bDYHfqvNZdVbpMxjdqzna4iUBBaFuI1kVLIk1x66cx+qImhrz
|
||||
SAcRgll5jGJTBjcwFLOCsVD8rrj3HFdEfQKBwQDGUczciC7PmJAQGeDTpNE/3LAp
|
||||
07sm7usAFxfR0bubNLFOr7VsHFRTtLtV2vd4zTi0LjqMY4A7ZJy0K83dUAsF0gB6
|
||||
347D5ingnNhAtxEJ9Djf9u2THhjUk/qN7oKcD8GIJoSdT66KF9VVVEzGCqxN7DNR
|
||||
aA9Lki4EV/4V9QBGXI6tCSzn39U2ek692iEi11i0cpOUrzTM4rjQTwtdlwgSGRc0
|
||||
xRVJAEhWE7hFTjv4vNWr2W3CSswBGlNNRlBJO3UCgcEAutf+d6kNmCxJVlfAXuIg
|
||||
uvYfJgO80F0Im0UWYcSr4iKx3JIXpj0oJqQiHqh7/4YFM110nIUNyy2ruJtrfChX
|
||||
yNqSylkXayEHBTR4N/s+6qITEgQjfvru7c/gxan7/worGyGcAte4jLpgcFn8jxT0
|
||||
8lrZrbJhfSxWjl+YsYn4LRAcpYStKLSqkjSjNATho4RSFhpS44o4LZmKzZGQhxLK
|
||||
/KvmCBQDAG9BiOTanXz9jHzE3svtHT8p0Hprdt9xrjK9AoHASunTbOq0ZA7JPIvJ
|
||||
9aiosmr20JX+eDJ/6sTOZp/HMlWxNHwDGBeLcyMuMLxKBwPei5F65FUht03GM/jo
|
||||
BtWZlFVDgSa5kx56azJULf35Hb13ToLEM3KHBqXvW3XhOHprLLcAGTxkPh3KpDT3
|
||||
20dk1vqGWBXq0S0i3NkwTbMCq5GDA7IXmG9g5vdEj0q6gaK/C0rMnLmiRFLQZT+2
|
||||
l1/Z2JxJu9FGvRCyQnGoheWLmeYbAJNddqsybKg5F1OcOE2RAoHAY2dQKRBqhaPc
|
||||
UZAgdoaMg47V/6p1/bX4MbCWbBgdW+2kLkeNnMIeLKhtSxClwlNGipqEkdf89cwD
|
||||
zrk9XAHSJ5l7eYlPoRLjBV3uEPaM5s5e2jJWbW/rMrR1e5RJ2C2eTRlZLuQLvJXf
|
||||
32Vnod3GK5n0duif+lcdyvlYqc6bMFxCiroF5+IVRSW86WjBixo3zOGqRS6U9YFH
|
||||
HmR/wMG3qCFYGKmg7eAndb9lgWvkHVq3ft/YKMY2IRnIbtrKntqE
|
||||
-----END RSA PRIVATE KEY-----
|
||||
168
@vates/nbd-client/tests/nbdclient.integ.mjs
Normal file
168
@vates/nbd-client/tests/nbdclient.integ.mjs
Normal file
@@ -0,0 +1,168 @@
|
||||
import NbdClient from '../client.mjs'
|
||||
import { spawn, exec } from 'node:child_process'
|
||||
import fs from 'node:fs/promises'
|
||||
import { test } from 'tap'
|
||||
import tmp from 'tmp'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { Socket } from 'node:net'
|
||||
import { NBD_DEFAULT_PORT } from '../constants.mjs'
|
||||
import assert from 'node:assert'
|
||||
|
||||
const FILE_SIZE = 10 * 1024 * 1024
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
const data = Buffer.alloc(size, 0)
|
||||
for (let i = 0; i < size; i += 4) {
|
||||
data.writeUInt32BE(i, i)
|
||||
}
|
||||
await fs.writeFile(tmpPath, data)
|
||||
|
||||
return tmpPath
|
||||
}
|
||||
|
||||
async function spawnNbdKit(path) {
|
||||
let tries = 5
|
||||
// wait for server to be ready
|
||||
|
||||
const nbdServer = spawn(
|
||||
'nbdkit',
|
||||
[
|
||||
'file',
|
||||
path,
|
||||
'--newstyle', //
|
||||
'--exit-with-parent',
|
||||
'--read-only',
|
||||
'--export-name=MY_SECRET_EXPORT',
|
||||
'--tls=on',
|
||||
'--tls-certificates=./tests/',
|
||||
// '--tls-verify-peer',
|
||||
// '--verbose',
|
||||
'--exit-with-parent',
|
||||
],
|
||||
{
|
||||
stdio: ['inherit', 'inherit', 'inherit'],
|
||||
}
|
||||
)
|
||||
nbdServer.on('error', err => {
|
||||
console.error(err)
|
||||
})
|
||||
do {
|
||||
try {
|
||||
const socket = new Socket()
|
||||
await new Promise((resolve, reject) => {
|
||||
socket.connect(NBD_DEFAULT_PORT, 'localhost')
|
||||
socket.once('error', reject)
|
||||
socket.once('connect', resolve)
|
||||
})
|
||||
socket.destroy()
|
||||
break
|
||||
} catch (err) {
|
||||
tries--
|
||||
if (tries <= 0) {
|
||||
throw err
|
||||
} else {
|
||||
await new Promise(resolve => setTimeout(resolve, 1000))
|
||||
}
|
||||
}
|
||||
} while (true)
|
||||
return nbdServer
|
||||
}
|
||||
|
||||
async function killNbdKit() {
|
||||
return new Promise((resolve, reject) =>
|
||||
exec('pkill -9 -f -o nbdkit', err => {
|
||||
err ? reject(err) : resolve()
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
let nbdServer = await spawnNbdKit(path)
|
||||
const client = new NbdClient(
|
||||
{
|
||||
address: '127.0.0.1',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
cert: `-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUeHpQ0IeD6BmP2zgsv3LV3J4BI/EwDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA1MTcxMzU1MzBaFw0yNDA1
|
||||
MTYxMzU1MzBaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQC/8wLopj/iZY6ijmpvgCJsl+zY0hQZQcIoaCs0H75u
|
||||
8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZolevaSJLNT2Iolscvc2W9NCF4N1V6y
|
||||
zs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh67u+uI40732AfQqD01BNCTD/uHRB
|
||||
lKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y2SJVTeT4a1sSJixl6I1YPmt80FJh
|
||||
gq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULwdJOGgmqGRDzgZKJS5UUpxe/ViEO4
|
||||
59I18vIkgibaRYhENgmnP3lIzTOLlUe07tbSML5RGBbBAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBR/8+zYoL0H0LdWfULHg1LynFdSbzAfBgNVHSMEGDAWgBR/8+zYoL0H0LdW
|
||||
fULHg1LynFdSbzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBD
|
||||
OF5bTmbDEGoZ6OuQaI0vyya/T4FeaoWmh22gLeL6dEEmUVGJ1NyMTOvG9GiGJ8OM
|
||||
QhD1uHJei45/bXOYIDGey2+LwLWye7T4vtRFhf8amYh0ReyP/NV4/JoR/U3pTSH6
|
||||
tns7GZ4YWdwUhvOOlm17EQKVO/hP3t9mp74gcjdL4bCe5MYSheKuNACAakC1OR0U
|
||||
ZakJMP9ijvQuq8spfCzrK+NbHKNHR9tEgQw+ax/t1Au4dGVtFbcoxqCrx2kTl0RP
|
||||
CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
57ABhfYpMlZ4aHjuN1bL
|
||||
-----END CERTIFICATE-----
|
||||
`,
|
||||
},
|
||||
{
|
||||
readAhead: 2,
|
||||
}
|
||||
)
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const CHUNK_SIZE = 1024 * 1024 // non default size
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
}
|
||||
const nbdIterator = client.readBlocks(function* () {
|
||||
for (const index of indexes) {
|
||||
yield { index, size: CHUNK_SIZE }
|
||||
}
|
||||
})
|
||||
let i = 0
|
||||
for await (const block of nbdIterator) {
|
||||
let blockOk = true
|
||||
let firstFail
|
||||
for (let j = 0; j < CHUNK_SIZE; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
if (!blockOk && firstFail === undefined) {
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content`)
|
||||
i++
|
||||
|
||||
// flaky server is flaky
|
||||
if (i % 7 === 0) {
|
||||
// kill the older nbdkit process
|
||||
await killNbdKit()
|
||||
nbdServer = await spawnNbdKit(path)
|
||||
}
|
||||
}
|
||||
|
||||
// we can reuse the conneciton to read other blocks
|
||||
// default iterator
|
||||
const nbdIteratorWithDefaultBlockIterator = client.readBlocks()
|
||||
let nb = 0
|
||||
for await (const block of nbdIteratorWithDefaultBlockIterator) {
|
||||
nb++
|
||||
tap.equal(block.length, 2 * 1024 * 1024)
|
||||
}
|
||||
|
||||
tap.equal(nb, 5)
|
||||
assert.rejects(() => client.readBlock(100, CHUNK_SIZE))
|
||||
|
||||
await client.disconnect()
|
||||
// double disconnection shouldn't pose any problem
|
||||
await client.disconnect()
|
||||
nbdServer.kill()
|
||||
await fs.unlink(path)
|
||||
})
|
||||
21
@vates/nbd-client/tests/server-cert.pem
Normal file
21
@vates/nbd-client/tests/server-cert.pem
Normal file
@@ -0,0 +1,21 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUeHpQ0IeD6BmP2zgsv3LV3J4BI/EwDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA1MTcxMzU1MzBaFw0yNDA1
|
||||
MTYxMzU1MzBaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQC/8wLopj/iZY6ijmpvgCJsl+zY0hQZQcIoaCs0H75u
|
||||
8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZolevaSJLNT2Iolscvc2W9NCF4N1V6y
|
||||
zs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh67u+uI40732AfQqD01BNCTD/uHRB
|
||||
lKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y2SJVTeT4a1sSJixl6I1YPmt80FJh
|
||||
gq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULwdJOGgmqGRDzgZKJS5UUpxe/ViEO4
|
||||
59I18vIkgibaRYhENgmnP3lIzTOLlUe07tbSML5RGBbBAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBR/8+zYoL0H0LdWfULHg1LynFdSbzAfBgNVHSMEGDAWgBR/8+zYoL0H0LdW
|
||||
fULHg1LynFdSbzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBD
|
||||
OF5bTmbDEGoZ6OuQaI0vyya/T4FeaoWmh22gLeL6dEEmUVGJ1NyMTOvG9GiGJ8OM
|
||||
QhD1uHJei45/bXOYIDGey2+LwLWye7T4vtRFhf8amYh0ReyP/NV4/JoR/U3pTSH6
|
||||
tns7GZ4YWdwUhvOOlm17EQKVO/hP3t9mp74gcjdL4bCe5MYSheKuNACAakC1OR0U
|
||||
ZakJMP9ijvQuq8spfCzrK+NbHKNHR9tEgQw+ax/t1Au4dGVtFbcoxqCrx2kTl0RP
|
||||
CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
57ABhfYpMlZ4aHjuN1bL
|
||||
-----END CERTIFICATE-----
|
||||
28
@vates/nbd-client/tests/server-key.pem
Normal file
28
@vates/nbd-client/tests/server-key.pem
Normal file
@@ -0,0 +1,28 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC/8wLopj/iZY6i
|
||||
jmpvgCJsl+zY0hQZQcIoaCs0H75u8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZol
|
||||
evaSJLNT2Iolscvc2W9NCF4N1V6yzs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh
|
||||
67u+uI40732AfQqD01BNCTD/uHRBlKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y
|
||||
2SJVTeT4a1sSJixl6I1YPmt80FJhgq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULw
|
||||
dJOGgmqGRDzgZKJS5UUpxe/ViEO459I18vIkgibaRYhENgmnP3lIzTOLlUe07tbS
|
||||
ML5RGBbBAgMBAAECggEATLYiafcTHfgnZmjTOad0WoDnC4n9tVBV948WARlUooLS
|
||||
duL3RQRHCLz9/ZaTuFA1XDpNcYyc/B/IZoU7aJGZR3+JSmJBjowpUphu+klVNNG4
|
||||
i6lDRrzYlUI0hfdLjHsDTDBIKi91KcB0lix/VkvsrVQvDHwsiR2ZAIiVWAWQFKrR
|
||||
5O3DhSTHbqyq47uR58rWr4Zf3zvZaUl841AS1yELzCiZqz7AenvyWphim0c0XA5d
|
||||
I63CEShntHnEAA9OMcP8+BNf/3AmqB4welY+m8elB3aJNH+j7DKq/AWqaM5nl2PC
|
||||
cS6qgpxwOyTxEOyj1xhwK5ZMRR3heW3NfutIxSOPlwKBgQDB9ZkrBeeGVtCISO7C
|
||||
eCANzSLpeVrahTvaCSQLdPHsLRLDUc+5mxdpi3CaRlzYs3S1OWdAtyWX9mBryltF
|
||||
qDPhCNjFDyHok4D3wLEWdS9oUVwEKUM8fOPW3tXLLiMM7p4862Qo7LqnqHzPqsnz
|
||||
22iZo5yjcc7aLJ+VmFrbAowwOwKBgQD9WNCvczTd7Ymn7zEvdiAyNoS0OZ0orwEJ
|
||||
zGaxtjqVguGklNfrb/UB+eKNGE80+YnMiSaFc9IQPetLntZdV0L7kWYdCI8kGDNA
|
||||
DbVRCOp+z8DwAojlrb/zsYu23anQozT3WeHxVU66lNuyEQvSW2tJa8gN1htrD7uY
|
||||
5KLibYrBMwKBgEM0iiHyJcrSgeb2/mO7o7+keJhVSDm3OInP6QFfQAQJihrLWiKB
|
||||
rpcPjbCm+LzNUX8JqNEvpIMHB1nR/9Ye9frfSdzd5W3kzicKSVHywL5wkmWOtpFa
|
||||
5Mcq5wFDtzlf5MxO86GKhRJauwRptRgdyhySKFApuva1x4XaCIEiXNjJAoGBAN82
|
||||
t3c+HCBEv3o05rMYcrmLC1T3Rh6oQlPtwbVmByvfywsFEVCgrc/16MPD3VWhXuXV
|
||||
GRmPuE8THxLbead30M5xhvShq+xzXgRbj5s8Lc9ZIHbW5OLoOS1vCtgtaQcoJOyi
|
||||
Rs4pCVqe+QpktnO6lEZ2Libys+maTQEiwNibBxu9AoGAUG1V5aKMoXa7pmGeuFR6
|
||||
ES+1NDiCt6yDq9BsLZ+e2uqvWTkvTGLLwvH6xf9a0pnnILd0AUTKAAaoUdZS6++E
|
||||
cGob7fxMwEE+UETp0QBgLtfjtExMOFwr2avw8PV4CYEUkPUAm2OFB2Twh+d/PNfr
|
||||
FAxF1rN47SBPNbFI8N4TFsg=
|
||||
-----END PRIVATE KEY-----
|
||||
1
@vates/node-vsphere-soap/.npmignore
Symbolic link
1
@vates/node-vsphere-soap/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
22
@vates/node-vsphere-soap/LICENSE
Normal file
22
@vates/node-vsphere-soap/LICENSE
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 reedog117
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
127
@vates/node-vsphere-soap/README.md
Normal file
127
@vates/node-vsphere-soap/README.md
Normal file
@@ -0,0 +1,127 @@
|
||||
forked from https://github.com/reedog117/node-vsphere-soap
|
||||
|
||||
# node-vsphere-soap
|
||||
|
||||
[](https://gitter.im/reedog117/node-vsphere-soap?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
This is a Node.js module to connect to VMware vCenter servers and/or ESXi hosts and perform operations using the [vSphere Web Services API]. If you're feeling really adventurous, you can use this module to port vSphere operations from other languages (such as the Perl, Python, and Go libraries that exist) and have fully native Node.js code controlling your VMware virtual infrastructure!
|
||||
|
||||
This is very much in alpha.
|
||||
|
||||
## Authors
|
||||
|
||||
- Patrick C - [@reedog117]
|
||||
|
||||
## Version
|
||||
|
||||
0.0.2-5
|
||||
|
||||
## Installation
|
||||
|
||||
```sh
|
||||
$ npm install node-vsphere-soap --save
|
||||
```
|
||||
|
||||
## Sample Code
|
||||
|
||||
### To connect to a vCenter server:
|
||||
|
||||
var nvs = require('node-vsphere-soap');
|
||||
var vc = new nvs.Client(host, user, password, sslVerify);
|
||||
vc.once('ready', function() {
|
||||
// perform work here
|
||||
});
|
||||
vc.once('error', function(err) {
|
||||
// handle error here
|
||||
});
|
||||
|
||||
#### Arguments
|
||||
|
||||
- host = hostname or IP of vCenter/ESX/ESXi server
|
||||
- user = username
|
||||
- password = password
|
||||
- sslVerify = true|false - set to false if you have self-signed/unverified certificates
|
||||
|
||||
#### Events
|
||||
|
||||
- ready = emits when session authenticated with server
|
||||
- error = emits when there's an error
|
||||
- _err_ contains the error
|
||||
|
||||
#### Client instance variables
|
||||
|
||||
- serviceContent - ServiceContent object retrieved by RetrieveServiceContent API call
|
||||
- userName - username of authenticated user
|
||||
- fullName - full name of authenticated user
|
||||
|
||||
### To run a command:
|
||||
|
||||
var vcCmd = vc.runCommand( commandToRun, arguments );
|
||||
vcCmd.once('result', function( result, raw, soapHeader) {
|
||||
// handle results
|
||||
});
|
||||
vcCmd.once('error', function( err) {
|
||||
// handle errors
|
||||
});
|
||||
|
||||
#### Arguments
|
||||
|
||||
- commandToRun = Method from the vSphere API
|
||||
- arguments = JSON document containing arguments to send
|
||||
|
||||
#### Events
|
||||
|
||||
- result = emits when session authenticated with server
|
||||
- _result_ contains the JSON-formatted result from the server
|
||||
- _raw_ contains the raw SOAP XML response from the server
|
||||
- _soapHeader_ contains any soapHeaders from the server
|
||||
- error = emits when there's an error
|
||||
- _err_ contains the error
|
||||
|
||||
Make sure you check out tests/vsphere-soap.test.js for examples on how to create commands to run
|
||||
|
||||
## Development
|
||||
|
||||
node-vsphere-soap uses a number of open source projects to work properly:
|
||||
|
||||
- [node.js] - evented I/O for the backend
|
||||
- [node-soap] - SOAP client for Node.js
|
||||
- [soap-cookie] - cookie authentication for the node-soap module
|
||||
- [lodash] - for quickly manipulating JSON
|
||||
- [lab] - testing engine
|
||||
- [code] - assertion engine used with lab
|
||||
|
||||
Want to contribute? Great!
|
||||
|
||||
### Todo's
|
||||
|
||||
- Write More Tests
|
||||
- Create Travis CI test harness with a fake vCenter Instance
|
||||
- Add Code Comments
|
||||
|
||||
### Testing
|
||||
|
||||
I have been testing on a Mac with node v0.10.36 and both ESXi and vCenter 5.5.
|
||||
|
||||
To edit tests, edit the file **test/vsphere-soap.test.js**
|
||||
|
||||
To point the module at your own vCenter/ESXi host, edit **config-test.stub.js** and save it as **config-test.js**
|
||||
|
||||
To run test scripts:
|
||||
|
||||
```sh
|
||||
$ npm test
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
[vSphere Web Services API]: http://pubs.vmware.com/vsphere-55/topic/com.vmware.wssdk.apiref.doc/right-pane.html
|
||||
[node-soap]: https://github.com/vpulim/node-soap
|
||||
[node.js]: http://nodejs.org/
|
||||
[soap-cookie]: https://github.com/shanestillwell/soap-cookie
|
||||
[code]: https://github.com/hapijs/code
|
||||
[lab]: https://github.com/hapijs/lab
|
||||
[lodash]: https://lodash.com/
|
||||
[@reedog117]: http://www.twitter.com/reedog117
|
||||
230
@vates/node-vsphere-soap/lib/client.mjs
Normal file
230
@vates/node-vsphere-soap/lib/client.mjs
Normal file
@@ -0,0 +1,230 @@
|
||||
/*
|
||||
|
||||
node-vsphere-soap
|
||||
|
||||
client.js
|
||||
|
||||
This file creates the Client class
|
||||
|
||||
- when the class is instantiated, a connection will be made to the ESXi/vCenter server to verify that the creds are good
|
||||
- upon a bad login, the connnection will be terminated
|
||||
|
||||
*/
|
||||
|
||||
import { EventEmitter } from 'events'
|
||||
import axios from 'axios'
|
||||
import https from 'node:https'
|
||||
import util from 'util'
|
||||
import soap from 'soap'
|
||||
import Cookie from 'soap-cookie' // required for session persistence
|
||||
|
||||
// Client class
|
||||
// inherits from EventEmitter
|
||||
// possible events: connect, error, ready
|
||||
|
||||
export function Client(vCenterHostname, username, password, sslVerify) {
|
||||
this.status = 'disconnected'
|
||||
this.reconnectCount = 0
|
||||
|
||||
sslVerify = typeof sslVerify !== 'undefined' ? sslVerify : false
|
||||
|
||||
EventEmitter.call(this)
|
||||
|
||||
// sslVerify argument handling
|
||||
if (sslVerify) {
|
||||
this.clientopts = {}
|
||||
} else {
|
||||
this.clientopts = {
|
||||
request: axios.create({
|
||||
httpsAgent: new https.Agent({
|
||||
rejectUnauthorized: false,
|
||||
}),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
this.connectionInfo = {
|
||||
host: vCenterHostname,
|
||||
user: username,
|
||||
password,
|
||||
sslVerify,
|
||||
}
|
||||
|
||||
this._loginArgs = {
|
||||
userName: this.connectionInfo.user,
|
||||
password: this.connectionInfo.password,
|
||||
}
|
||||
|
||||
this._vcUrl = 'https://' + this.connectionInfo.host + '/sdk/vimService.wsdl'
|
||||
|
||||
// connect to the vCenter / ESXi host
|
||||
this.on('connect', this._connect)
|
||||
this.emit('connect')
|
||||
|
||||
// close session
|
||||
this.on('close', this._close)
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
util.inherits(Client, EventEmitter)
|
||||
|
||||
Client.prototype.runCommand = function (command, args) {
|
||||
const self = this
|
||||
let cmdargs
|
||||
if (!args || args === null) {
|
||||
cmdargs = {}
|
||||
} else {
|
||||
cmdargs = args
|
||||
}
|
||||
|
||||
const emitter = new EventEmitter()
|
||||
|
||||
// check if client has successfully connected
|
||||
if (self.status === 'ready' || self.status === 'connecting') {
|
||||
self.client.VimService.VimPort[command](cmdargs, function (err, result, raw, soapHeader) {
|
||||
if (err) {
|
||||
_soapErrorHandler(self, emitter, command, cmdargs, err)
|
||||
}
|
||||
if (command === 'Logout') {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
}
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
} else {
|
||||
// if connection not ready or connecting, reconnect to instance
|
||||
if (self.status === 'disconnected') {
|
||||
self.emit('connect')
|
||||
}
|
||||
self.once('ready', function () {
|
||||
self.client.VimService.VimPort[command](cmdargs, function (err, result, raw, soapHeader) {
|
||||
if (err) {
|
||||
_soapErrorHandler(self, emitter, command, cmdargs, err)
|
||||
}
|
||||
if (command === 'Logout') {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
}
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
return emitter
|
||||
}
|
||||
|
||||
Client.prototype.close = function () {
|
||||
const self = this
|
||||
|
||||
self.emit('close')
|
||||
}
|
||||
|
||||
Client.prototype._connect = function () {
|
||||
const self = this
|
||||
|
||||
if (self.status !== 'disconnected') {
|
||||
return
|
||||
}
|
||||
|
||||
self.status = 'connecting'
|
||||
|
||||
soap.createClient(
|
||||
self._vcUrl,
|
||||
self.clientopts,
|
||||
function (err, client) {
|
||||
if (err) {
|
||||
self.emit('error', err)
|
||||
throw err
|
||||
}
|
||||
|
||||
self.client = client // save client for later use
|
||||
|
||||
self
|
||||
.runCommand('RetrieveServiceContent', { _this: 'ServiceInstance' })
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
if (!result.returnval) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', raw)
|
||||
return
|
||||
}
|
||||
|
||||
self.serviceContent = result.returnval
|
||||
self.sessionManager = result.returnval.sessionManager
|
||||
const loginArgs = { _this: self.sessionManager, ...self._loginArgs }
|
||||
|
||||
self
|
||||
.runCommand('Login', loginArgs)
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
self.authCookie = new Cookie(client.lastResponseHeaders)
|
||||
self.client.setSecurity(self.authCookie) // needed since vSphere SOAP WS uses cookies
|
||||
|
||||
self.userName = result.returnval.userName
|
||||
self.fullName = result.returnval.fullName
|
||||
self.reconnectCount = 0
|
||||
|
||||
self.status = 'ready'
|
||||
self.emit('ready')
|
||||
process.once('beforeExit', self._close)
|
||||
})
|
||||
.once('error', function (err) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', err)
|
||||
})
|
||||
})
|
||||
.once('error', function (err) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', err)
|
||||
})
|
||||
},
|
||||
self._vcUrl
|
||||
)
|
||||
}
|
||||
|
||||
Client.prototype._close = function () {
|
||||
const self = this
|
||||
|
||||
if (self.status === 'ready') {
|
||||
self
|
||||
.runCommand('Logout', { _this: self.sessionManager })
|
||||
.once('result', function () {
|
||||
self.status = 'disconnected'
|
||||
})
|
||||
.once('error', function () {
|
||||
/* don't care of error during disconnection */
|
||||
self.status = 'disconnected'
|
||||
})
|
||||
} else {
|
||||
self.status = 'disconnected'
|
||||
}
|
||||
}
|
||||
|
||||
function _soapErrorHandler(self, emitter, command, args, err) {
|
||||
err = err || { body: 'general error' }
|
||||
|
||||
if (err.body.match(/session is not authenticated/)) {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
|
||||
if (self.reconnectCount < 10) {
|
||||
self.reconnectCount += 1
|
||||
self
|
||||
.runCommand(command, args)
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
.once('error', function (err) {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
})
|
||||
} else {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
}
|
||||
} else {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
// end
|
||||
38
@vates/node-vsphere-soap/package.json
Normal file
38
@vates/node-vsphere-soap/package.json
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"name": "@vates/node-vsphere-soap",
|
||||
"version": "2.0.0",
|
||||
"description": "interface to vSphere SOAP/WSDL from node for interfacing with vCenter or ESXi, forked from node-vsphere-soap",
|
||||
"main": "lib/client.mjs",
|
||||
"author": "reedog117",
|
||||
"repository": {
|
||||
"directory": "@vates/node-vsphere-soap",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"axios": "^1.4.0",
|
||||
"soap": "^1.0.0",
|
||||
"soap-cookie": "^0.10.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
},
|
||||
"keywords": [
|
||||
"vsphere",
|
||||
"vcenter",
|
||||
"api",
|
||||
"soap",
|
||||
"wsdl"
|
||||
],
|
||||
"preferGlobal": false,
|
||||
"license": "MIT",
|
||||
"private": false,
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/node-vsphere-soap",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
11
@vates/node-vsphere-soap/test/config-test.stub.mjs
Normal file
11
@vates/node-vsphere-soap/test/config-test.stub.mjs
Normal file
@@ -0,0 +1,11 @@
|
||||
// place your own credentials here for a vCenter or ESXi server
|
||||
// this information will be used for connecting to a vCenter instance
|
||||
// for module testing
|
||||
// name the file config-test.js
|
||||
|
||||
export const vCenterTestCreds = {
|
||||
vCenterIP: 'vcsa',
|
||||
vCenterUser: 'vcuser',
|
||||
vCenterPassword: 'vcpw',
|
||||
vCenter: true,
|
||||
}
|
||||
138
@vates/node-vsphere-soap/test/vsphere-soap.test.mjs
Normal file
138
@vates/node-vsphere-soap/test/vsphere-soap.test.mjs
Normal file
@@ -0,0 +1,138 @@
|
||||
/*
|
||||
vsphere-soap.test.js
|
||||
|
||||
tests for the vCenterConnectionInstance class
|
||||
*/
|
||||
|
||||
import assert from 'assert'
|
||||
import { describe, it } from 'test'
|
||||
|
||||
import * as vc from '../lib/client.mjs'
|
||||
|
||||
// eslint-disable-next-line n/no-missing-import
|
||||
import { vCenterTestCreds as TestCreds } from '../config-test.mjs'
|
||||
|
||||
const VItest = new vc.Client(TestCreds.vCenterIP, TestCreds.vCenterUser, TestCreds.vCenterPassword, false)
|
||||
|
||||
describe('Client object initialization:', function () {
|
||||
it('provides a successful login', { timeout: 5000 }, function (t, done) {
|
||||
VItest.once('ready', function () {
|
||||
assert.notEqual(VItest.userName, null)
|
||||
assert.notEqual(VItest.fullName, null)
|
||||
assert.notEqual(VItest.serviceContent, null)
|
||||
done()
|
||||
}).once('error', function (err) {
|
||||
console.error(err)
|
||||
// this should fail if there's a problem
|
||||
assert.notEqual(VItest.userName, null)
|
||||
assert.notEqual(VItest.fullName, null)
|
||||
assert.notEqual(VItest.serviceContent, null)
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Client reconnection test:', function () {
|
||||
it('can successfully reconnect', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('Logout', { _this: VItest.serviceContent.sessionManager })
|
||||
.once('result', function (result) {
|
||||
// now we're logged out, so let's try running a command to test automatic re-login
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' })
|
||||
.once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error(err)
|
||||
})
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error(err)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// these tests don't work yet
|
||||
describe('Client tests - query commands:', function () {
|
||||
it('retrieves current time', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' }).once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it('retrieves current time 2 (check for event clobbering)', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' }).once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it('can obtain the names of all Virtual Machines in the inventory', { timeout: 20000 }, function (t, done) {
|
||||
// get property collector
|
||||
const propertyCollector = VItest.serviceContent.propertyCollector
|
||||
// get view manager
|
||||
const viewManager = VItest.serviceContent.viewManager
|
||||
// get root folder
|
||||
const rootFolder = VItest.serviceContent.rootFolder
|
||||
|
||||
let containerView, objectSpec, traversalSpec, propertySpec, propertyFilterSpec
|
||||
// this is the equivalent to
|
||||
VItest.runCommand('CreateContainerView', {
|
||||
_this: viewManager,
|
||||
container: rootFolder,
|
||||
type: ['VirtualMachine'],
|
||||
recursive: true,
|
||||
}).once('result', function (result) {
|
||||
// build all the data structures needed to query all the vm names
|
||||
containerView = result.returnval
|
||||
|
||||
objectSpec = {
|
||||
attributes: { 'xsi:type': 'ObjectSpec' }, // setting attributes xsi:type is important or else the server may mis-recognize types!
|
||||
obj: containerView,
|
||||
skip: true,
|
||||
}
|
||||
|
||||
traversalSpec = {
|
||||
attributes: { 'xsi:type': 'TraversalSpec' },
|
||||
name: 'traverseEntities',
|
||||
type: 'ContainerView',
|
||||
path: 'view',
|
||||
skip: false,
|
||||
}
|
||||
|
||||
objectSpec = { ...objectSpec, selectSet: [traversalSpec] }
|
||||
|
||||
propertySpec = {
|
||||
attributes: { 'xsi:type': 'PropertySpec' },
|
||||
type: 'VirtualMachine',
|
||||
pathSet: ['name'],
|
||||
}
|
||||
|
||||
propertyFilterSpec = {
|
||||
attributes: { 'xsi:type': 'PropertyFilterSpec' },
|
||||
propSet: [propertySpec],
|
||||
objectSet: [objectSpec],
|
||||
}
|
||||
// TODO: research why it fails if propSet is declared after objectSet
|
||||
|
||||
VItest.runCommand('RetrievePropertiesEx', {
|
||||
_this: propertyCollector,
|
||||
specSet: [propertyFilterSpec],
|
||||
options: { attributes: { type: 'RetrieveOptions' } },
|
||||
})
|
||||
.once('result', function (result, raw) {
|
||||
assert.notEqual(result.returnval.objects, null)
|
||||
if (Array.isArray(result.returnval.objects)) {
|
||||
assert.strictEqual(result.returnval.objects[0].obj.attributes.type, 'VirtualMachine')
|
||||
} else {
|
||||
assert.strictEqual(result.returnval.objects.obj.attributes.type, 'VirtualMachine')
|
||||
}
|
||||
done()
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error('\n\nlast request : ' + VItest.client.lastRequest, err)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,6 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const isUtf8 = require('isutf8')
|
||||
|
||||
/**
|
||||
* Read a chunk of data from a stream.
|
||||
@@ -81,6 +82,13 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
|
||||
|
||||
// Buffer.isUtf8 is too recent for now
|
||||
// @todo : replace external package by Buffer.isUtf8 when the supported version of node reach 18
|
||||
|
||||
if (chunk.length < 1024 && isUtf8(chunk)) {
|
||||
error.text = chunk.toString('utf8')
|
||||
}
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
|
||||
@@ -102,12 +102,37 @@ describe('readChunkStrict', function () {
|
||||
assert.strictEqual(error.chunk, undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
it('throws if stream ends with not enough data, utf8', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
|
||||
assert.strictEqual(error.text, 'foobar')
|
||||
assert.deepEqual(error.chunk, Buffer.from('foobar'))
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data, non utf8 ', async () => {
|
||||
const source = [Buffer.alloc(10, 128), Buffer.alloc(10, 128)]
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(source), 30))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 20, expected: 30)')
|
||||
assert.strictEqual(error.text, undefined)
|
||||
assert.deepEqual(error.chunk, Buffer.concat(source))
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data, utf8 , long data', async () => {
|
||||
const source = Buffer.from('a'.repeat(1500))
|
||||
const error = await rejectionOf(readChunkStrict(makeStream([source]), 2000))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, `stream has ended with not enough data (actual: 1500, expected: 2000)`)
|
||||
assert.strictEqual(error.text, undefined)
|
||||
assert.deepEqual(error.chunk, source)
|
||||
})
|
||||
|
||||
it('succeed', async () => {
|
||||
const source = Buffer.from('a'.repeat(20))
|
||||
const chunk = await readChunkStrict(makeStream([source]), 10)
|
||||
assert.deepEqual(source.subarray(10), chunk)
|
||||
})
|
||||
})
|
||||
|
||||
describe('skip', function () {
|
||||
@@ -134,6 +159,16 @@ describe('skip', function () {
|
||||
it('returns less size if stream ends', async () => {
|
||||
assert.deepEqual(await skip(makeStream('foo bar'), 10), 7)
|
||||
})
|
||||
|
||||
it('put back if it read too much', async () => {
|
||||
let source = makeStream(['foo', 'bar'])
|
||||
await skip(source, 1) // read part of data chunk
|
||||
const chunk = (await readChunkStrict(source, 2)).toString('utf-8')
|
||||
assert.strictEqual(chunk, 'oo')
|
||||
|
||||
source = makeStream(['foo', 'bar'])
|
||||
assert.strictEqual(await skip(source, 3), 3) // read aligned with data chunk
|
||||
})
|
||||
})
|
||||
|
||||
describe('skipStrict', function () {
|
||||
@@ -144,4 +179,9 @@ describe('skipStrict', function () {
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
|
||||
assert.deepEqual(error.bytesSkipped, 7)
|
||||
})
|
||||
it('succeed', async () => {
|
||||
const source = makeStream(['foo', 'bar', 'baz'])
|
||||
const res = await skipStrict(source, 4)
|
||||
assert.strictEqual(res, undefined)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "1.1.1",
|
||||
"version": "1.2.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -33,5 +33,8 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.2.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"isutf8": "^4.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,10 +2,8 @@
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
// data in this object will be sent along the *start* event
|
||||
//
|
||||
// property names should be chosen as not to clash with properties used by `Task` or `combineEvents`
|
||||
data: {
|
||||
// this object will be sent in the *start* event
|
||||
properties: {
|
||||
name: 'my task',
|
||||
},
|
||||
|
||||
@@ -16,13 +14,15 @@ const task = new Task({
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId } = event
|
||||
const { name, parentId, properties } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
} else if (type === 'abortionRequested') {
|
||||
const { reason } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
@@ -36,7 +36,6 @@ task.id
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
// - aborted
|
||||
task.status
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
@@ -89,6 +88,30 @@ const onProgress = makeOnProgress({
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
|
||||
// timestamp at which the task started
|
||||
taskLog.start
|
||||
|
||||
// current status of the task as described in the previous section
|
||||
taskLog.status
|
||||
|
||||
// undefined or a dictionary of properties attached to the task
|
||||
taskLog.properties
|
||||
|
||||
// timestamp at which the abortion was requested, undefined otherwise
|
||||
taskLog.abortionRequestedAt
|
||||
|
||||
// undefined or an array of infos emitted on the task
|
||||
taskLog.infos
|
||||
|
||||
// undefined or an array of warnings emitted on the task
|
||||
taskLog.warnings
|
||||
|
||||
// timestamp at which the task ended, undefined otherwise
|
||||
taskLog.end
|
||||
|
||||
// undefined or the result value of the task
|
||||
taskLog.result
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
|
||||
@@ -18,10 +18,8 @@ npm install --save @vates/task
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
// data in this object will be sent along the *start* event
|
||||
//
|
||||
// property names should be chosen as not to clash with properties used by `Task` or `combineEvents`
|
||||
data: {
|
||||
// this object will be sent in the *start* event
|
||||
properties: {
|
||||
name: 'my task',
|
||||
},
|
||||
|
||||
@@ -32,13 +30,15 @@ const task = new Task({
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId } = event
|
||||
const { name, parentId, properties } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
} else if (type === 'abortionRequested') {
|
||||
const { reason } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
@@ -52,7 +52,6 @@ task.id
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
// - aborted
|
||||
task.status
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
@@ -105,6 +104,30 @@ const onProgress = makeOnProgress({
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
|
||||
// timestamp at which the task started
|
||||
taskLog.start
|
||||
|
||||
// current status of the task as described in the previous section
|
||||
taskLog.status
|
||||
|
||||
// undefined or a dictionnary of properties attached to the task
|
||||
taskLog.properties
|
||||
|
||||
// timestamp at which the abortion was requested, undefined otherwise
|
||||
taskLog.abortionRequestedAt
|
||||
|
||||
// undefined or an array of infos emitted on the task
|
||||
taskLog.infos
|
||||
|
||||
// undefined or an array of warnings emitted on the task
|
||||
taskLog.warnings
|
||||
|
||||
// timestamp at which the task ended, undefined otherwise
|
||||
taskLog.end
|
||||
|
||||
// undefined or the result value of the task
|
||||
taskLog.result
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
|
||||
@@ -4,36 +4,18 @@ const assert = require('node:assert').strict
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
function omit(source, keys, target = { __proto__: null }) {
|
||||
for (const key of Object.keys(source)) {
|
||||
if (!keys.has(key)) {
|
||||
target[key] = source[key]
|
||||
}
|
||||
}
|
||||
return target
|
||||
}
|
||||
|
||||
const IGNORED_START_PROPS = new Set([
|
||||
'end',
|
||||
'infos',
|
||||
'properties',
|
||||
'result',
|
||||
'status',
|
||||
'tasks',
|
||||
'timestamp',
|
||||
'type',
|
||||
'warnings',
|
||||
])
|
||||
|
||||
exports.makeOnProgress = function ({ onRootTaskEnd = noop, onRootTaskStart = noop, onTaskUpdate = noop }) {
|
||||
const taskLogs = new Map()
|
||||
return function onProgress(event) {
|
||||
const { id, type } = event
|
||||
let taskLog
|
||||
if (type === 'start') {
|
||||
taskLog = omit(event, IGNORED_START_PROPS)
|
||||
taskLog.start = event.timestamp
|
||||
taskLog.status = 'pending'
|
||||
taskLog = {
|
||||
id,
|
||||
properties: { __proto__: null, ...event.properties },
|
||||
start: event.timestamp,
|
||||
status: 'pending',
|
||||
}
|
||||
taskLogs.set(id, taskLog)
|
||||
|
||||
const { parentId } = event
|
||||
@@ -65,6 +47,8 @@ exports.makeOnProgress = function ({ onRootTaskEnd = noop, onRootTaskStart = noo
|
||||
taskLog.end = event.timestamp
|
||||
taskLog.result = event.result
|
||||
taskLog.status = event.status
|
||||
} else if (type === 'abortionRequested') {
|
||||
taskLog.abortionRequestedAt = event.timestamp
|
||||
}
|
||||
|
||||
if (type === 'end' && taskLog.$root === taskLog) {
|
||||
|
||||
@@ -11,7 +11,7 @@ describe('makeOnProgress()', function () {
|
||||
const events = []
|
||||
let log
|
||||
const task = new Task({
|
||||
data: { name: 'task' },
|
||||
properties: { name: 'task' },
|
||||
onProgress: makeOnProgress({
|
||||
onRootTaskStart(log_) {
|
||||
assert.equal(log, undefined)
|
||||
@@ -32,36 +32,50 @@ describe('makeOnProgress()', function () {
|
||||
|
||||
assert.equal(events.length, 0)
|
||||
|
||||
let i = 0
|
||||
|
||||
await task.run(async () => {
|
||||
assert.equal(events[0], 'onRootTaskStart')
|
||||
assert.equal(events[1], 'onTaskUpdate')
|
||||
assert.equal(log.name, 'task')
|
||||
assert.equal(events[i++], 'onRootTaskStart')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.id, task.id)
|
||||
assert.equal(log.properties.name, 'task')
|
||||
assert(Math.abs(log.start - Date.now()) < 10)
|
||||
|
||||
Task.set('name', 'new name')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.name, 'new name')
|
||||
|
||||
Task.set('progress', 0)
|
||||
assert.equal(events[2], 'onTaskUpdate')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.progress, 0)
|
||||
|
||||
Task.info('foo', {})
|
||||
assert.equal(events[3], 'onTaskUpdate')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.deepEqual(log.infos, [{ data: {}, message: 'foo' }])
|
||||
|
||||
await Task.run({ data: { name: 'subtask' } }, () => {
|
||||
assert.equal(events[4], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].name, 'subtask')
|
||||
const subtask = new Task({ properties: { name: 'subtask' } })
|
||||
await subtask.run(() => {
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].properties.name, 'subtask')
|
||||
|
||||
Task.warning('bar', {})
|
||||
assert.equal(events[5], 'onTaskUpdate')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.deepEqual(log.tasks[0].warnings, [{ data: {}, message: 'bar' }])
|
||||
|
||||
subtask.abort()
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert(Math.abs(log.tasks[0].abortionRequestedAt - Date.now()) < 10)
|
||||
})
|
||||
assert.equal(events[6], 'onTaskUpdate')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].status, 'success')
|
||||
|
||||
Task.set('progress', 100)
|
||||
assert.equal(events[7], 'onTaskUpdate')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.progress, 100)
|
||||
})
|
||||
assert.equal(events[8], 'onRootTaskEnd')
|
||||
assert.equal(events[9], 'onTaskUpdate')
|
||||
assert.equal(events[i++], 'onRootTaskEnd')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert(Math.abs(log.end - Date.now()) < 10)
|
||||
assert.equal(log.status, 'success')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -10,11 +10,10 @@ function define(object, property, value) {
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const ABORTED = 'aborted'
|
||||
const FAILURE = 'failure'
|
||||
const PENDING = 'pending'
|
||||
const SUCCESS = 'success'
|
||||
exports.STATUS = { ABORTED, FAILURE, PENDING, SUCCESS }
|
||||
exports.STATUS = { FAILURE, PENDING, SUCCESS }
|
||||
|
||||
// stored in the global context so that various versions of the library can interact.
|
||||
const asyncStorageKey = '@vates/task@0'
|
||||
@@ -83,8 +82,8 @@ exports.Task = class Task {
|
||||
return this.#status
|
||||
}
|
||||
|
||||
constructor({ data = {}, onProgress } = {}) {
|
||||
this.#startData = data
|
||||
constructor({ properties, onProgress } = {}) {
|
||||
this.#startData = { properties }
|
||||
|
||||
if (onProgress !== undefined) {
|
||||
this.#onProgress = onProgress
|
||||
@@ -105,12 +104,16 @@ exports.Task = class Task {
|
||||
|
||||
const { signal } = this.#abortController
|
||||
signal.addEventListener('abort', () => {
|
||||
if (this.status === PENDING && !this.#running) {
|
||||
if (this.status === PENDING) {
|
||||
this.#maybeStart()
|
||||
|
||||
const status = ABORTED
|
||||
this.#status = status
|
||||
this.#emit('end', { result: signal.reason, status })
|
||||
this.#emit('abortionRequested', { reason: signal.reason })
|
||||
|
||||
if (!this.#running) {
|
||||
const status = FAILURE
|
||||
this.#status = status
|
||||
this.#emit('end', { result: signal.reason, status })
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -156,9 +159,7 @@ exports.Task = class Task {
|
||||
this.#running = false
|
||||
return result
|
||||
} catch (result) {
|
||||
const { signal } = this.#abortController
|
||||
const aborted = signal.aborted && result === signal.reason
|
||||
const status = aborted ? ABORTED : FAILURE
|
||||
const status = FAILURE
|
||||
|
||||
this.#status = status
|
||||
this.#emit('end', { status, result })
|
||||
|
||||
@@ -15,7 +15,7 @@ function assertEvent(task, expected, eventIndex = -1) {
|
||||
assert.equal(typeof actual.id, 'string')
|
||||
assert.equal(typeof actual.timestamp, 'number')
|
||||
for (const keys of Object.keys(expected)) {
|
||||
assert.equal(actual[keys], expected[keys])
|
||||
assert.deepEqual(actual[keys], expected[keys])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,10 +30,10 @@ function createTask(opts) {
|
||||
describe('Task', function () {
|
||||
describe('contructor', function () {
|
||||
it('data properties are passed to the start event', async function () {
|
||||
const data = { foo: 0, bar: 1 }
|
||||
const task = createTask({ data })
|
||||
const properties = { foo: 0, bar: 1 }
|
||||
const task = createTask({ properties })
|
||||
await task.run(noop)
|
||||
assertEvent(task, { ...data, type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'start', properties }, 0)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -79,20 +79,22 @@ describe('Task', function () {
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'aborted')
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 2)
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'end', status: 'aborted', result: reason }, 1)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result: reason }, 2)
|
||||
})
|
||||
|
||||
it('does not abort if the task fails without the abort reason', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
const result = new Error()
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort({})
|
||||
task.abort(reason)
|
||||
|
||||
throw result
|
||||
})
|
||||
@@ -100,18 +102,20 @@ describe('Task', function () {
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 2)
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result }, 1)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result }, 2)
|
||||
})
|
||||
|
||||
it('does not abort if the task succeed', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
const result = {}
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort({})
|
||||
task.abort(reason)
|
||||
|
||||
return result
|
||||
})
|
||||
@@ -119,9 +123,10 @@ describe('Task', function () {
|
||||
|
||||
assert.equal(task.status, 'success')
|
||||
|
||||
assert.equal(task.$events.length, 2)
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 1)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 2)
|
||||
})
|
||||
|
||||
it('aborts before task is running', function () {
|
||||
@@ -130,11 +135,12 @@ describe('Task', function () {
|
||||
|
||||
task.abort(reason)
|
||||
|
||||
assert.equal(task.status, 'aborted')
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 2)
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'end', status: 'aborted', result: reason }, 1)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result: reason }, 2)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -243,7 +249,7 @@ describe('Task', function () {
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
|
||||
it('changes to aborted after run is complete', async function () {
|
||||
it('changes to failure if aborted after run is complete', async function () {
|
||||
const task = createTask()
|
||||
await task
|
||||
.run(() => {
|
||||
@@ -252,13 +258,13 @@ describe('Task', function () {
|
||||
Task.abortSignal.throwIfAborted()
|
||||
})
|
||||
.catch(noop)
|
||||
assert.equal(task.status, 'aborted')
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
|
||||
it('changes to aborted if aborted when not running', async function () {
|
||||
it('changes to failure if aborted when not running', function () {
|
||||
const task = createTask()
|
||||
task.abort()
|
||||
assert.equal(task.status, 'aborted')
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.2",
|
||||
"version": "0.2.0",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.js'
|
||||
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.mjs'
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
import getopts from 'getopts'
|
||||
import { basename, dirname } from 'path'
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.37.0",
|
||||
"@xen-orchestra/fs": "^4.0.0",
|
||||
"filenamify": "^4.1.0",
|
||||
"@xen-orchestra/backups": "^0.40.0",
|
||||
"@xen-orchestra/fs": "^4.0.1",
|
||||
"filenamify": "^6.0.0",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.21.0"
|
||||
@@ -27,7 +27,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "1.0.7",
|
||||
"version": "1.0.10",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { Metadata } = require('./_runners/Metadata.js')
|
||||
const { VmsXapi } = require('./_runners/VmsXapi.js')
|
||||
|
||||
exports.createRunner = function createRunner(opts) {
|
||||
const { type } = opts.job
|
||||
switch (type) {
|
||||
case 'backup':
|
||||
return new VmsXapi(opts)
|
||||
case 'metadataBackup':
|
||||
return new Metadata(opts)
|
||||
default:
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
}
|
||||
17
@xen-orchestra/backups/Backup.mjs
Normal file
17
@xen-orchestra/backups/Backup.mjs
Normal file
@@ -0,0 +1,17 @@
|
||||
import { Metadata } from './_runners/Metadata.mjs'
|
||||
import { VmsRemote } from './_runners/VmsRemote.mjs'
|
||||
import { VmsXapi } from './_runners/VmsXapi.mjs'
|
||||
|
||||
export function createRunner(opts) {
|
||||
const { type } = opts.job
|
||||
switch (type) {
|
||||
case 'backup':
|
||||
return new VmsXapi(opts)
|
||||
case 'mirrorBackup':
|
||||
return new VmsRemote(opts)
|
||||
case 'metadataBackup':
|
||||
return new Metadata(opts)
|
||||
default:
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
'use strict'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
exports.DurablePartition = class DurablePartition {
|
||||
export class DurablePartition {
|
||||
// private resource API is used exceptionally to be able to separate resource creation and release
|
||||
#partitionDisposers = {}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
'use strict'
|
||||
import { Task } from './Task.mjs'
|
||||
|
||||
const { Task } = require('./Task')
|
||||
|
||||
exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
export class HealthCheckVmBackup {
|
||||
#restoredVm
|
||||
#timeout
|
||||
#xapi
|
||||
@@ -1,13 +1,11 @@
|
||||
'use strict'
|
||||
import assert from 'node:assert'
|
||||
|
||||
const assert = require('assert')
|
||||
import { formatFilenameDate } from './_filenameDate.mjs'
|
||||
import { importIncrementalVm } from './_incrementalVm.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
import { watchStreamSize } from './_watchStreamSize.mjs'
|
||||
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { importIncrementalVm } = require('./_incrementalVm.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
exports.ImportVmBackup = class ImportVmBackup {
|
||||
export class ImportVmBackup {
|
||||
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses, mapVdisSrs = {} } = {} }) {
|
||||
this._adapter = adapter
|
||||
this._importIncrementalVmSettings = { newMacAddresses, mapVdisSrs }
|
||||
@@ -1,43 +1,39 @@
|
||||
'use strict'
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import { compose } from '@vates/compose'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } from 'vhd-lib'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { deduped } from '@vates/disposable/deduped.js'
|
||||
import { dirname, join, resolve } from 'node:path'
|
||||
import { execFile } from 'child_process'
|
||||
import { mount } from '@vates/fuse-vhd'
|
||||
import { readdir, lstat } from 'node:fs/promises'
|
||||
import { synchronized } from 'decorator-synchronized'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { ZipFile } from 'yazl'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import fromEvent from 'promise-toolbox/fromEvent'
|
||||
import groupBy from 'lodash/groupBy.js'
|
||||
import pDefer from 'promise-toolbox/defer'
|
||||
import pickBy from 'lodash/pickBy.js'
|
||||
import tar from 'tar'
|
||||
import zlib from 'zlib'
|
||||
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { synchronized } = require('decorator-synchronized')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const fromEvent = require('promise-toolbox/fromEvent')
|
||||
const pDefer = require('promise-toolbox/defer')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const pickBy = require('lodash/pickBy.js')
|
||||
const { dirname, join, normalize, resolve } = require('path')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { execFile } = require('child_process')
|
||||
const { readdir, lstat } = require('fs-extra')
|
||||
const { v4: uuidv4 } = require('uuid')
|
||||
const { ZipFile } = require('yazl')
|
||||
const zlib = require('zlib')
|
||||
import { BACKUP_DIR } from './_getVmBackupDir.mjs'
|
||||
import { cleanVm } from './_cleanVm.mjs'
|
||||
import { formatFilenameDate } from './_filenameDate.mjs'
|
||||
import { getTmpDir } from './_getTmpDir.mjs'
|
||||
import { isMetadataFile } from './_backupType.mjs'
|
||||
import { isValidXva } from './_isValidXva.mjs'
|
||||
import { listPartitions, LVM_PARTITION_TYPE } from './_listPartitions.mjs'
|
||||
import { lvs, pvs } from './_lvm.mjs'
|
||||
import { watchStreamSize } from './_watchStreamSize.mjs'
|
||||
|
||||
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
|
||||
const { cleanVm } = require('./_cleanVm.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { getTmpDir } = require('./_getTmpDir.js')
|
||||
const { isMetadataFile } = require('./_backupType.js')
|
||||
const { isValidXva } = require('./_isValidXva.js')
|
||||
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
|
||||
const { lvs, pvs } = require('./_lvm.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize')
|
||||
// @todo : this import is marked extraneous , sould be fixed when lib is published
|
||||
const { mount } = require('@vates/fuse-vhd')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
export const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
|
||||
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
|
||||
|
||||
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
|
||||
export const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
|
||||
|
||||
@@ -46,20 +42,23 @@ const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
||||
const noop = Function.prototype
|
||||
|
||||
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
||||
const makeRelative = path => resolve('/', path).slice(1)
|
||||
const resolveSubpath = (root, path) => resolve(root, makeRelative(path))
|
||||
|
||||
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
|
||||
async function addZipEntries(zip, realBasePath, virtualBasePath, relativePaths) {
|
||||
for (const relativePath of relativePaths) {
|
||||
const realPath = join(realBasePath, relativePath)
|
||||
const virtualPath = join(virtualBasePath, relativePath)
|
||||
|
||||
async function addDirectory(files, realPath, metadataPath) {
|
||||
const stats = await lstat(realPath)
|
||||
if (stats.isDirectory()) {
|
||||
await asyncMap(await readdir(realPath), file =>
|
||||
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
|
||||
)
|
||||
} else if (stats.isFile()) {
|
||||
files.push({
|
||||
realPath,
|
||||
metadataPath,
|
||||
})
|
||||
const stats = await lstat(realPath)
|
||||
const { mode, mtime } = stats
|
||||
const opts = { mode, mtime }
|
||||
if (stats.isDirectory()) {
|
||||
zip.addEmptyDirectory(virtualPath, opts)
|
||||
await addZipEntries(zip, realPath, virtualPath, await readdir(realPath))
|
||||
} else if (stats.isFile()) {
|
||||
zip.addFile(realPath, virtualPath, opts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,7 +75,7 @@ const debounceResourceFactory = factory =>
|
||||
return this._debounceResource(factory.apply(this, arguments))
|
||||
}
|
||||
|
||||
class RemoteAdapter {
|
||||
export class RemoteAdapter {
|
||||
constructor(
|
||||
handler,
|
||||
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy = false } = {}
|
||||
@@ -187,17 +186,6 @@ class RemoteAdapter {
|
||||
})
|
||||
}
|
||||
|
||||
async *_usePartitionFiles(diskId, partitionId, paths) {
|
||||
const path = yield this.getPartition(diskId, partitionId)
|
||||
|
||||
const files = []
|
||||
await asyncMap(paths, file =>
|
||||
addDirectory(files, resolveSubpath(path, file), normalize('./' + file).replace(/\/+$/, ''))
|
||||
)
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
// check if we will be allowed to merge a a vhd created in this adapter
|
||||
// with the vhd at path `path`
|
||||
async isMergeableParent(packedParentUid, path) {
|
||||
@@ -214,15 +202,24 @@ class RemoteAdapter {
|
||||
})
|
||||
}
|
||||
|
||||
fetchPartitionFiles(diskId, partitionId, paths) {
|
||||
fetchPartitionFiles(diskId, partitionId, paths, format) {
|
||||
const { promise, reject, resolve } = pDefer()
|
||||
Disposable.use(
|
||||
async function* () {
|
||||
const files = yield this._usePartitionFiles(diskId, partitionId, paths)
|
||||
const zip = new ZipFile()
|
||||
files.forEach(({ realPath, metadataPath }) => zip.addFile(realPath, metadataPath))
|
||||
zip.end()
|
||||
const { outputStream } = zip
|
||||
const path = yield this.getPartition(diskId, partitionId)
|
||||
let outputStream
|
||||
|
||||
if (format === 'tgz') {
|
||||
outputStream = tar.c({ cwd: path, gzip: true }, paths.map(makeRelative))
|
||||
} else if (format === 'zip') {
|
||||
const zip = new ZipFile()
|
||||
await addZipEntries(zip, path, '', paths.map(makeRelative))
|
||||
zip.end()
|
||||
;({ outputStream } = zip)
|
||||
} else {
|
||||
throw new Error('unsupported format ' + format)
|
||||
}
|
||||
|
||||
resolve(outputStream)
|
||||
await fromEvent(outputStream, 'end')
|
||||
}.bind(this)
|
||||
@@ -829,11 +826,7 @@ decorateMethodsWith(RemoteAdapter, {
|
||||
debounceResourceFactory,
|
||||
]),
|
||||
|
||||
_usePartitionFiles: Disposable.factory,
|
||||
|
||||
getDisk: compose([Disposable.factory, [deduped, diskId => [diskId]], debounceResourceFactory]),
|
||||
|
||||
getPartition: Disposable.factory,
|
||||
})
|
||||
|
||||
exports.RemoteAdapter = RemoteAdapter
|
||||
@@ -1,9 +1,9 @@
|
||||
'use strict'
|
||||
import { join, resolve } from 'node:path/posix'
|
||||
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { PATH_DB_DUMP } = require('./_runners/_PoolMetadataBackup.js')
|
||||
import { DIR_XO_POOL_METADATA_BACKUPS } from './RemoteAdapter.mjs'
|
||||
import { PATH_DB_DUMP } from './_runners/_PoolMetadataBackup.mjs'
|
||||
|
||||
exports.RestoreMetadataBackup = class RestoreMetadataBackup {
|
||||
export class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
this._backupId = backupId
|
||||
this._handler = handler
|
||||
@@ -20,7 +20,8 @@ exports.RestoreMetadataBackup = class RestoreMetadataBackup {
|
||||
task: xapi.task_create('Import pool metadata'),
|
||||
})
|
||||
} else {
|
||||
return String(await handler.readFile(`${backupId}/data.json`))
|
||||
const metadata = JSON.parse(await handler.readFile(join(backupId, 'metadata.json')))
|
||||
return String(await handler.readFile(resolve(backupId, metadata.data ?? 'data.json')))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,5 @@
|
||||
'use strict'
|
||||
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
const Zone = require('node-zone')
|
||||
import CancelToken from 'promise-toolbox/CancelToken'
|
||||
import Zone from 'node-zone'
|
||||
|
||||
const logAfterEnd = log => {
|
||||
const error = new Error('task has already ended')
|
||||
@@ -30,7 +28,7 @@ const serializeError = error =>
|
||||
|
||||
const $$task = Symbol('@xen-orchestra/backups/Task')
|
||||
|
||||
class Task {
|
||||
export class Task {
|
||||
static get cancelToken() {
|
||||
const task = Zone.current.data[$$task]
|
||||
return task !== undefined ? task.#cancelToken : CancelToken.none
|
||||
@@ -151,7 +149,6 @@ class Task {
|
||||
})
|
||||
}
|
||||
}
|
||||
exports.Task = Task
|
||||
|
||||
for (const method of ['info', 'warning']) {
|
||||
Task[method] = (...args) => Zone.current.data[$$task]?.[method](...args)
|
||||
@@ -1,6 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
exports.isMetadataFile = filename => filename.endsWith('.json')
|
||||
exports.isVhdFile = filename => filename.endsWith('.vhd')
|
||||
exports.isXvaFile = filename => filename.endsWith('.xva')
|
||||
exports.isXvaSumFile = filename => filename.endsWith('.xva.checksum')
|
||||
4
@xen-orchestra/backups/_backupType.mjs
Normal file
4
@xen-orchestra/backups/_backupType.mjs
Normal file
@@ -0,0 +1,4 @@
|
||||
export const isMetadataFile = filename => filename.endsWith('.json')
|
||||
export const isVhdFile = filename => filename.endsWith('.vhd')
|
||||
export const isXvaFile = filename => filename.endsWith('.xva')
|
||||
export const isXvaSumFile = filename => filename.endsWith('.xva.checksum')
|
||||
@@ -1,25 +1,25 @@
|
||||
'use strict'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { catchGlobalErrors } from '@xen-orchestra/log/configure'
|
||||
|
||||
const logger = require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { compose } from '@vates/compose'
|
||||
import { createCachedLookup } from '@vates/cached-dns.lookup'
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource.js'
|
||||
import { createRunner } from './Backup.mjs'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { deduped } from '@vates/disposable/deduped.js'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { parseDuration } from '@vates/parse-duration'
|
||||
import { Xapi } from '@xen-orchestra/xapi'
|
||||
|
||||
require('@xen-orchestra/log/configure').catchGlobalErrors(logger)
|
||||
import { RemoteAdapter } from './RemoteAdapter.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
|
||||
require('@vates/cached-dns.lookup').createCachedLookup().patchGlobal()
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { createDebounceResource } = require('@vates/disposable/debounceResource.js')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { createRunner } = require('./Backup.js')
|
||||
const { parseDuration } = require('@vates/parse-duration')
|
||||
const { Xapi } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { RemoteAdapter } = require('./RemoteAdapter.js')
|
||||
const { Task } = require('./Task.js')
|
||||
createCachedLookup().patchGlobal()
|
||||
|
||||
const logger = createLogger('xo:backups:worker')
|
||||
catchGlobalErrors(logger)
|
||||
const { debug } = logger
|
||||
|
||||
class BackupWorker {
|
||||
@@ -1,13 +1,11 @@
|
||||
'use strict'
|
||||
|
||||
const cancelable = require('promise-toolbox/cancelable')
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
import cancelable from 'promise-toolbox/cancelable'
|
||||
import CancelToken from 'promise-toolbox/CancelToken'
|
||||
|
||||
// Similar to `Promise.all` + `map` but pass a cancel token to the callback
|
||||
//
|
||||
// If any of the executions fails, the cancel token will be triggered and the
|
||||
// first reason will be rejected.
|
||||
exports.cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
||||
export const cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
||||
const { cancel, token } = CancelToken.source([$cancelToken])
|
||||
try {
|
||||
return await Promise.all(
|
||||
@@ -1,19 +1,19 @@
|
||||
'use strict'
|
||||
import test from 'test'
|
||||
import { strict as assert } from 'node:assert'
|
||||
|
||||
const { beforeEach, afterEach, test, describe } = require('test')
|
||||
const assert = require('assert').strict
|
||||
import tmp from 'tmp'
|
||||
import fs from 'fs-extra'
|
||||
import * as uuid from 'uuid'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { RemoteAdapter } from './RemoteAdapter.mjs'
|
||||
import { VHDFOOTER, VHDHEADER } from './tests.fixtures.mjs'
|
||||
import { VhdFile, Constants, VhdDirectory, VhdAbstract } from 'vhd-lib'
|
||||
import { checkAliases } from './_cleanVm.mjs'
|
||||
import { dirname, basename } from 'node:path'
|
||||
import { rimraf } from 'rimraf'
|
||||
|
||||
const tmp = require('tmp')
|
||||
const fs = require('fs-extra')
|
||||
const uuid = require('uuid')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter')
|
||||
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
|
||||
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
|
||||
const { checkAliases } = require('./_cleanVm')
|
||||
const { dirname, basename } = require('path')
|
||||
const { rimraf } = require('rimraf')
|
||||
const { beforeEach, afterEach, describe } = test
|
||||
|
||||
let tempDir, adapter, handler, jobId, vdiId, basePath, relativePath
|
||||
const rootPath = 'xo-vm-backups/VMUUID/'
|
||||
@@ -1,19 +1,18 @@
|
||||
'use strict'
|
||||
import * as UUID from 'uuid'
|
||||
import sum from 'lodash/sum.js'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { Constants, openVhd, VhdAbstract, VhdFile } from 'vhd-lib'
|
||||
import { isVhdAlias, resolveVhdAlias } from 'vhd-lib/aliases.js'
|
||||
import { dirname, resolve } from 'node:path'
|
||||
import { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } from './_backupType.mjs'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
import { mergeVhdChain } from 'vhd-lib/merge.js'
|
||||
|
||||
import { Task } from './Task.mjs'
|
||||
import { Disposable } from 'promise-toolbox'
|
||||
import handlerPath from '@xen-orchestra/fs/path'
|
||||
|
||||
const sum = require('lodash/sum')
|
||||
const UUID = require('uuid')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
|
||||
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPES } = Constants
|
||||
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
const { mergeVhdChain } = require('vhd-lib/merge')
|
||||
|
||||
const { Task } = require('./Task.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
const handlerPath = require('@xen-orchestra/fs/path')
|
||||
|
||||
// checking the size of a vhd directory is costly
|
||||
// 1 Http Query per 1000 blocks
|
||||
@@ -117,7 +116,7 @@ const listVhds = async (handler, vmDir, logWarn) => {
|
||||
return { vhds, interruptedVhds, aliases }
|
||||
}
|
||||
|
||||
async function checkAliases(
|
||||
export async function checkAliases(
|
||||
aliasPaths,
|
||||
targetDataRepository,
|
||||
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
|
||||
@@ -176,11 +175,9 @@ async function checkAliases(
|
||||
})
|
||||
}
|
||||
|
||||
exports.checkAliases = checkAliases
|
||||
|
||||
const defaultMergeLimiter = limitConcurrency(1)
|
||||
|
||||
exports.cleanVm = async function cleanVm(
|
||||
export async function cleanVm(
|
||||
vmDir,
|
||||
{
|
||||
fixMetadata,
|
||||
@@ -1,8 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { utcFormat, utcParse } = require('d3-time-format')
|
||||
|
||||
// Format a date in ISO 8601 in a safe way to be used in filenames
|
||||
// (even on Windows).
|
||||
exports.formatFilenameDate = utcFormat('%Y%m%dT%H%M%SZ')
|
||||
exports.parseFilenameDate = utcParse('%Y%m%dT%H%M%SZ')
|
||||
6
@xen-orchestra/backups/_filenameDate.mjs
Normal file
6
@xen-orchestra/backups/_filenameDate.mjs
Normal file
@@ -0,0 +1,6 @@
|
||||
import { utcFormat, utcParse } from 'd3-time-format'
|
||||
|
||||
// Format a date in ISO 8601 in a safe way to be used in filenames
|
||||
// (even on Windows).
|
||||
export const formatFilenameDate = utcFormat('%Y%m%dT%H%M%SZ')
|
||||
export const parseFilenameDate = utcParse('%Y%m%dT%H%M%SZ')
|
||||
@@ -1,6 +1,4 @@
|
||||
'use strict'
|
||||
|
||||
// returns all entries but the last retention-th
|
||||
exports.getOldEntries = function getOldEntries(retention, entries) {
|
||||
export function getOldEntries(retention, entries) {
|
||||
return entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries
|
||||
}
|
||||
@@ -1,13 +1,11 @@
|
||||
'use strict'
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const { join } = require('path')
|
||||
const { mkdir, rmdir } = require('fs-extra')
|
||||
const { tmpdir } = require('os')
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { join } from 'node:path'
|
||||
import { mkdir, rmdir } from 'node:fs/promises'
|
||||
import { tmpdir } from 'os'
|
||||
|
||||
const MAX_ATTEMPTS = 3
|
||||
|
||||
exports.getTmpDir = async function getTmpDir() {
|
||||
export async function getTmpDir() {
|
||||
for (let i = 0; true; ++i) {
|
||||
const path = join(tmpdir(), Math.random().toString(36).slice(2))
|
||||
try {
|
||||
@@ -1,8 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const BACKUP_DIR = 'xo-vm-backups'
|
||||
exports.BACKUP_DIR = BACKUP_DIR
|
||||
|
||||
exports.getVmBackupDir = function getVmBackupDir(uuid) {
|
||||
return `${BACKUP_DIR}/${uuid}`
|
||||
}
|
||||
5
@xen-orchestra/backups/_getVmBackupDir.mjs
Normal file
5
@xen-orchestra/backups/_getVmBackupDir.mjs
Normal file
@@ -0,0 +1,5 @@
|
||||
export const BACKUP_DIR = 'xo-vm-backups'
|
||||
|
||||
export function getVmBackupDir(uuid) {
|
||||
return `${BACKUP_DIR}/${uuid}`
|
||||
}
|
||||
@@ -1,24 +1,22 @@
|
||||
'use strict'
|
||||
import find from 'lodash/find.js'
|
||||
import groupBy from 'lodash/groupBy.js'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import omit from 'lodash/omit.js'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { CancelToken } from 'promise-toolbox'
|
||||
import { compareVersions } from 'compare-versions'
|
||||
import { createVhdStreamWithLength } from 'vhd-lib'
|
||||
import { defer } from 'golike-defer'
|
||||
|
||||
const find = require('lodash/find.js')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const omit = require('lodash/omit.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { CancelToken } = require('promise-toolbox')
|
||||
const { compareVersions } = require('compare-versions')
|
||||
const { createVhdStreamWithLength } = require('vhd-lib')
|
||||
const { defer } = require('golike-defer')
|
||||
import { cancelableMap } from './_cancelableMap.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
import pick from 'lodash/pick.js'
|
||||
|
||||
const { cancelableMap } = require('./_cancelableMap.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const pick = require('lodash/pick.js')
|
||||
export const TAG_BASE_DELTA = 'xo:base_delta'
|
||||
|
||||
const TAG_BASE_DELTA = 'xo:base_delta'
|
||||
exports.TAG_BASE_DELTA = TAG_BASE_DELTA
|
||||
export const TAG_COPY_SRC = 'xo:copy_of'
|
||||
|
||||
const TAG_COPY_SRC = 'xo:copy_of'
|
||||
exports.TAG_COPY_SRC = TAG_COPY_SRC
|
||||
const TAG_BACKUP_SR = 'xo:backup:sr'
|
||||
|
||||
const ensureArray = value => (value === undefined ? [] : Array.isArray(value) ? value : [value])
|
||||
const resolveUuid = async (xapi, cache, uuid, type) => {
|
||||
@@ -33,7 +31,7 @@ const resolveUuid = async (xapi, cache, uuid, type) => {
|
||||
return ref
|
||||
}
|
||||
|
||||
exports.exportIncrementalVm = async function exportIncrementalVm(
|
||||
export async function exportIncrementalVm(
|
||||
vm,
|
||||
baseVm,
|
||||
{
|
||||
@@ -143,7 +141,7 @@ exports.exportIncrementalVm = async function exportIncrementalVm(
|
||||
)
|
||||
}
|
||||
|
||||
exports.importIncrementalVm = defer(async function importIncrementalVm(
|
||||
export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
$defer,
|
||||
incrementalVm,
|
||||
sr,
|
||||
@@ -161,7 +159,10 @@ exports.importIncrementalVm = defer(async function importIncrementalVm(
|
||||
if (detectBase) {
|
||||
const remoteBaseVmUuid = vmRecord.other_config[TAG_BASE_DELTA]
|
||||
if (remoteBaseVmUuid) {
|
||||
baseVm = find(xapi.objects.all, obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid)
|
||||
baseVm = find(
|
||||
xapi.objects.all,
|
||||
obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid && obj[TAG_BACKUP_SR] === sr.$id
|
||||
)
|
||||
|
||||
if (!baseVm) {
|
||||
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
|
||||
@@ -1,6 +1,4 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
import assert from 'node:assert'
|
||||
|
||||
const COMPRESSED_MAGIC_NUMBERS = [
|
||||
// https://tools.ietf.org/html/rfc1952.html#page-5
|
||||
@@ -47,7 +45,7 @@ const isValidTar = async (handler, size, fd) => {
|
||||
}
|
||||
|
||||
// TODO: find an heuristic for compressed files
|
||||
async function isValidXva(path) {
|
||||
export async function isValidXva(path) {
|
||||
const handler = this._handler
|
||||
|
||||
// size is longer when encrypted + reading part of an encrypted file is not implemented
|
||||
@@ -74,6 +72,5 @@ async function isValidXva(path) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
exports.isValidXva = isValidXva
|
||||
|
||||
const noop = Function.prototype
|
||||
@@ -1,9 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createParser } = require('parse-pairs')
|
||||
const { execFile } = require('child_process')
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { createParser } from 'parse-pairs'
|
||||
import { execFile } from 'child_process'
|
||||
|
||||
const { debug } = createLogger('xo:backups:listPartitions')
|
||||
|
||||
@@ -24,8 +22,7 @@ const IGNORED_PARTITION_TYPES = {
|
||||
0x82: true, // swap
|
||||
}
|
||||
|
||||
const LVM_PARTITION_TYPE = 0x8e
|
||||
exports.LVM_PARTITION_TYPE = LVM_PARTITION_TYPE
|
||||
export const LVM_PARTITION_TYPE = 0x8e
|
||||
|
||||
const parsePartxLine = createParser({
|
||||
keyTransform: key => (key === 'UUID' ? 'id' : key.toLowerCase()),
|
||||
@@ -33,7 +30,7 @@ const parsePartxLine = createParser({
|
||||
})
|
||||
|
||||
// returns an empty array in case of a non-partitioned disk
|
||||
exports.listPartitions = async function listPartitions(devicePath) {
|
||||
export async function listPartitions(devicePath) {
|
||||
const parts = await fromCallback(execFile, 'partx', [
|
||||
'--bytes',
|
||||
'--output=NR,START,SIZE,NAME,UUID,TYPE',
|
||||
@@ -1,8 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const { createParser } = require('parse-pairs')
|
||||
const { execFile } = require('child_process')
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import { createParser } from 'parse-pairs'
|
||||
import { execFile } from 'child_process'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -29,5 +27,5 @@ const makeFunction =
|
||||
.map(Array.isArray(fields) ? parse : line => parse(line)[fields])
|
||||
}
|
||||
|
||||
exports.lvs = makeFunction('lvs')
|
||||
exports.pvs = makeFunction('pvs')
|
||||
export const lvs = makeFunction('lvs')
|
||||
export const pvs = makeFunction('pvs')
|
||||
@@ -1,22 +1,20 @@
|
||||
'use strict'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('../extractIdsFromSimplePattern.js')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
||||
const { XoMetadataBackup } = require('./_XoMetadataBackup.js')
|
||||
const { DEFAULT_SETTINGS, Abstract } = require('./_Abstract.js')
|
||||
const { runTask } = require('./_runTask.js')
|
||||
const { getAdaptersByRemote } = require('./_getAdaptersByRemote.js')
|
||||
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||
import { PoolMetadataBackup } from './_PoolMetadataBackup.mjs'
|
||||
import { XoMetadataBackup } from './_XoMetadataBackup.mjs'
|
||||
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
|
||||
const DEFAULT_METADATA_SETTINGS = {
|
||||
retentionPoolMetadata: 0,
|
||||
retentionXoMetadata: 0,
|
||||
}
|
||||
|
||||
exports.Metadata = class MetadataBackupRunner extends Abstract {
|
||||
export const Metadata = class MetadataBackupRunner extends Abstract {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
|
||||
96
@xen-orchestra/backups/_runners/VmsRemote.mjs
Normal file
96
@xen-orchestra/backups/_runners/VmsRemote.mjs
Normal file
@@ -0,0 +1,96 @@
|
||||
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
|
||||
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
import createStreamThrottle from './_createStreamThrottle.mjs'
|
||||
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
import { FullRemote } from './_vmRunners/FullRemote.mjs'
|
||||
import { IncrementalRemote } from './_vmRunners/IncrementalRemote.mjs'
|
||||
|
||||
const DEFAULT_REMOTE_VM_SETTINGS = {
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxExportRate: 0,
|
||||
maxMergedDeltasPerRun: Infinity,
|
||||
timeout: 0,
|
||||
validateVhdStreams: false,
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_REMOTE_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
return baseSettings
|
||||
}
|
||||
|
||||
async run() {
|
||||
const job = this._job
|
||||
const schedule = this._schedule
|
||||
const settings = this._settings
|
||||
|
||||
const throttleStream = createStreamThrottle(settings.maxExportRate)
|
||||
|
||||
const config = this._config
|
||||
await Disposable.use(
|
||||
() => this._getAdapter(job.sourceRemote),
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.remotes).map(id => id !== job.sourceRemote && this._getAdapter(id))
|
||||
),
|
||||
async ({ adapter: sourceRemoteAdapter }, healthCheckSr, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => !!_)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const vmsUuids = await sourceRemoteAdapter.listAllVms()
|
||||
|
||||
Task.info('vms', { vms: vmsUuids })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const handleVm = vmUuid => {
|
||||
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
|
||||
|
||||
const opts = {
|
||||
baseSettings,
|
||||
config,
|
||||
job,
|
||||
healthCheckSr,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vmUuid] },
|
||||
sourceRemoteAdapter,
|
||||
throttleStream,
|
||||
vmUuid,
|
||||
}
|
||||
let vmBackup
|
||||
if (job.mode === 'delta') {
|
||||
vmBackup = new IncrementalRemote(opts)
|
||||
} else if (job.mode === 'full') {
|
||||
vmBackup = new FullRemote(opts)
|
||||
} else {
|
||||
throw new Error(`Job mode ${job.mode} not implemented for mirror backup`)
|
||||
}
|
||||
|
||||
return runTask(taskStart, () => vmBackup.run())
|
||||
}
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmsUuids, !concurrency ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,17 +1,15 @@
|
||||
'use strict'
|
||||
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
|
||||
const { asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('../extractIdsFromSimplePattern.js')
|
||||
const { Task } = require('../Task.js')
|
||||
const createStreamThrottle = require('./_createStreamThrottle.js')
|
||||
const { DEFAULT_SETTINGS, Abstract } = require('./_Abstract.js')
|
||||
const { runTask } = require('./_runTask.js')
|
||||
const { getAdaptersByRemote } = require('./_getAdaptersByRemote.js')
|
||||
const { IncrementalXapi } = require('./_vmRunners/IncrementalXapi.js')
|
||||
const { FullXapi } = require('./_vmRunners/FullXapi.js')
|
||||
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
import createStreamThrottle from './_createStreamThrottle.mjs'
|
||||
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
import { IncrementalXapi } from './_vmRunners/IncrementalXapi.mjs'
|
||||
import { FullXapi } from './_vmRunners/FullXapi.mjs'
|
||||
|
||||
const DEFAULT_XAPI_VM_SETTINGS = {
|
||||
bypassVdiChainsCheck: false,
|
||||
@@ -19,6 +17,7 @@ const DEFAULT_XAPI_VM_SETTINGS = {
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
diskPerVmConcurrency: 0, // not limited by default
|
||||
exportRetention: 0,
|
||||
fullInterval: 0,
|
||||
healthCheckSr: undefined,
|
||||
@@ -35,7 +34,7 @@ const DEFAULT_XAPI_VM_SETTINGS = {
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
exports.VmsXapi = class VmsXapiBackupRunner extends Abstract {
|
||||
export const VmsXapi = class VmsXapiBackupRunner extends Abstract {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_XAPI_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
@@ -1,17 +1,15 @@
|
||||
'use strict'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import pTimeout from 'promise-toolbox/timeout'
|
||||
import { compileTemplate } from '@xen-orchestra/template'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { RemoteTimeoutError } from './_RemoteTimeoutError.mjs'
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const pTimeout = require('promise-toolbox/timeout')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { runTask } = require('./_runTask.js')
|
||||
const { RemoteTimeoutError } = require('./_RemoteTimeoutError.js')
|
||||
|
||||
exports.DEFAULT_SETTINGS = {
|
||||
export const DEFAULT_SETTINGS = {
|
||||
getRemoteTimeout: 300e3,
|
||||
reportWhen: 'failure',
|
||||
}
|
||||
|
||||
exports.Abstract = class AbstractRunner {
|
||||
export const Abstract = class AbstractRunner {
|
||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||
this._config = config
|
||||
this._getRecord = getConnectedRecord
|
||||
@@ -1,16 +1,13 @@
|
||||
'use strict'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
import { DIR_XO_POOL_METADATA_BACKUPS } from '../RemoteAdapter.mjs'
|
||||
import { forkStreamUnpipe } from './_forkStreamUnpipe.mjs'
|
||||
import { formatFilenameDate } from '../_filenameDate.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('../RemoteAdapter.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { Task } = require('../Task.js')
|
||||
export const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
|
||||
const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
exports.PATH_DB_DUMP = PATH_DB_DUMP
|
||||
|
||||
exports.PoolMetadataBackup = class PoolMetadataBackup {
|
||||
export class PoolMetadataBackup {
|
||||
constructor({ config, job, pool, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
@@ -1,8 +1,6 @@
|
||||
'use strict'
|
||||
class RemoteTimeoutError extends Error {
|
||||
export class RemoteTimeoutError extends Error {
|
||||
constructor(remoteId) {
|
||||
super('timeout while getting the remote ' + remoteId)
|
||||
this.remoteId = remoteId
|
||||
}
|
||||
}
|
||||
exports.RemoteTimeoutError = RemoteTimeoutError
|
||||
@@ -1,12 +1,11 @@
|
||||
'use strict'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { join } from '@xen-orchestra/fs/path'
|
||||
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
import { DIR_XO_CONFIG_BACKUPS } from '../RemoteAdapter.mjs'
|
||||
import { formatFilenameDate } from '../_filenameDate.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
|
||||
const { DIR_XO_CONFIG_BACKUPS } = require('../RemoteAdapter.js')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { Task } = require('../Task.js')
|
||||
|
||||
exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
export class XoMetadataBackup {
|
||||
constructor({ config, job, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
@@ -23,10 +22,11 @@ exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
const dir = `${scheduleDir}/${formatFilenameDate(timestamp)}`
|
||||
|
||||
const data = job.xoMetadata
|
||||
const fileName = `${dir}/data.json`
|
||||
const dataBaseName = './data.json'
|
||||
|
||||
const metadata = JSON.stringify(
|
||||
{
|
||||
data: dataBaseName,
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
scheduleId: schedule.id,
|
||||
@@ -36,6 +36,8 @@ exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
null,
|
||||
2
|
||||
)
|
||||
|
||||
const dataFileName = join(dir, dataBaseName)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
await asyncMap(
|
||||
@@ -52,7 +54,7 @@ exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
async () => {
|
||||
const handler = adapter.handler
|
||||
const dirMode = this._config.dirMode
|
||||
await handler.outputFile(fileName, data, { dirMode })
|
||||
await handler.outputFile(dataFileName, data, { dirMode })
|
||||
await handler.outputFile(metaDataFileName, metadata, {
|
||||
dirMode,
|
||||
})
|
||||
@@ -1,12 +1,10 @@
|
||||
'use strict'
|
||||
|
||||
const { pipeline } = require('node:stream')
|
||||
const { ThrottleGroup } = require('@kldzj/stream-throttle')
|
||||
const identity = require('lodash/identity.js')
|
||||
import { pipeline } from 'node:stream'
|
||||
import { ThrottleGroup } from '@kldzj/stream-throttle'
|
||||
import identity from 'lodash/identity.js'
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
module.exports = function createStreamThrottle(rate) {
|
||||
export default function createStreamThrottle(rate) {
|
||||
if (rate === 0) {
|
||||
return identity
|
||||
}
|
||||
@@ -1,14 +1,13 @@
|
||||
'use strict'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { finished, PassThrough } from 'node:stream'
|
||||
|
||||
const { finished, PassThrough } = require('node:stream')
|
||||
|
||||
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
|
||||
const { debug } = createLogger('xo:backups:forkStreamUnpipe')
|
||||
|
||||
// create a new readable stream from an existing one which may be piped later
|
||||
//
|
||||
// in case of error in the new readable stream, it will simply be unpiped
|
||||
// from the original one
|
||||
exports.forkStreamUnpipe = function forkStreamUnpipe(source) {
|
||||
export function forkStreamUnpipe(source) {
|
||||
const { forks = 0 } = source
|
||||
source.forks = forks + 1
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
'use strict'
|
||||
const getAdaptersByRemote = adapters => {
|
||||
export function getAdaptersByRemote(adapters) {
|
||||
const adaptersByRemote = {}
|
||||
adapters.forEach(({ adapter, remoteId }) => {
|
||||
adaptersByRemote[remoteId] = adapter
|
||||
})
|
||||
return adaptersByRemote
|
||||
}
|
||||
exports.getAdaptersByRemote = getAdaptersByRemote
|
||||
@@ -1,6 +0,0 @@
|
||||
'use strict'
|
||||
const { Task } = require('../Task.js')
|
||||
const noop = Function.prototype
|
||||
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
|
||||
exports.runTask = runTask
|
||||
5
@xen-orchestra/backups/_runners/_runTask.mjs
Normal file
5
@xen-orchestra/backups/_runners/_runTask.mjs
Normal file
@@ -0,0 +1,5 @@
|
||||
import { Task } from '../Task.mjs'
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
export const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
50
@xen-orchestra/backups/_runners/_vmRunners/FullRemote.mjs
Normal file
50
@xen-orchestra/backups/_runners/_vmRunners/FullRemote.mjs
Normal file
@@ -0,0 +1,50 @@
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import { AbstractRemote } from './_AbstractRemote.mjs'
|
||||
import { FullRemoteWriter } from '../_writers/FullRemoteWriter.mjs'
|
||||
import { forkStreamUnpipe } from '../_forkStreamUnpipe.mjs'
|
||||
import { watchStreamSize } from '../../_watchStreamSize.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
export const FullRemote = class FullRemoteVmBackupRunner extends AbstractRemote {
|
||||
_getRemoteWriter() {
|
||||
return FullRemoteWriter
|
||||
}
|
||||
async _run($defer) {
|
||||
const transferList = await this._computeTransferList(({ mode }) => mode === 'full')
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
if (transferList.length > 0) {
|
||||
for (const metadata of transferList) {
|
||||
const stream = await this._sourceRemoteAdapter.readFullVmBackup(metadata)
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
// @todo shouldn't transfer backup if it will be deleted by retention policy (higher retention on source than destination)
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp: metadata.timestamp,
|
||||
vm: metadata.vm,
|
||||
vmSnapshot: metadata.vmSnapshot,
|
||||
sizeContainer,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
// for healthcheck
|
||||
this._tags = metadata.vm.tags
|
||||
}
|
||||
} else {
|
||||
Task.info('No new data to upload for this VM')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
decorateMethodsWith(FullRemote, {
|
||||
_run: defer,
|
||||
})
|
||||
@@ -1,22 +1,20 @@
|
||||
'use strict'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
|
||||
const { forkStreamUnpipe } = require('../_forkStreamUnpipe.js')
|
||||
const { FullRemoteWriter } = require('../_writers/FullRemoteWriter.js')
|
||||
const { FullXapiWriter } = require('../_writers/FullXapiWriter.js')
|
||||
const { watchStreamSize } = require('../../_watchStreamSize.js')
|
||||
const { AbstractXapi } = require('./_AbstractXapi.js')
|
||||
import { forkStreamUnpipe } from '../_forkStreamUnpipe.mjs'
|
||||
import { FullRemoteWriter } from '../_writers/FullRemoteWriter.mjs'
|
||||
import { FullXapiWriter } from '../_writers/FullXapiWriter.mjs'
|
||||
import { watchStreamSize } from '../../_watchStreamSize.mjs'
|
||||
import { AbstractXapi } from './_AbstractXapi.mjs'
|
||||
|
||||
const { debug } = createLogger('xo:backups:FullXapiVmBackup')
|
||||
|
||||
exports.FullXapi = class FullXapiVmBackupRunner extends AbstractXapi {
|
||||
export const FullXapi = class FullXapiVmBackupRunner extends AbstractXapi {
|
||||
_getWriters() {
|
||||
return [FullRemoteWriter, FullXapiWriter]
|
||||
}
|
||||
|
||||
_mustDoSnapshot() {
|
||||
const { vm } = this
|
||||
const vm = this._vm
|
||||
|
||||
const settings = this._settings
|
||||
return (
|
||||
@@ -29,8 +27,10 @@ exports.FullXapi = class FullXapiVmBackupRunner extends AbstractXapi {
|
||||
|
||||
async _copy() {
|
||||
const { compression } = this.job
|
||||
const vm = this._vm
|
||||
const exportedVm = this._exportedVm
|
||||
const stream = this._throttleStream(
|
||||
await this._xapi.VM_export(this.exportedVm.$ref, {
|
||||
await this._xapi.VM_export(exportedVm.$ref, {
|
||||
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
|
||||
useSnapshot: false,
|
||||
})
|
||||
@@ -45,6 +45,8 @@ exports.FullXapi = class FullXapiVmBackupRunner extends AbstractXapi {
|
||||
sizeContainer,
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp,
|
||||
vm,
|
||||
vmSnapshot: exportedVm,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
@@ -0,0 +1,66 @@
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import assert from 'node:assert'
|
||||
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
|
||||
import { AbstractRemote } from './_AbstractRemote.mjs'
|
||||
import { forkDeltaExport } from './_forkDeltaExport.mjs'
|
||||
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
_getRemoteWriter() {
|
||||
return IncrementalRemoteWriter
|
||||
}
|
||||
async _run($defer) {
|
||||
const transferList = await this._computeTransferList(({ mode }) => mode === 'delta')
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
if (transferList.length > 0) {
|
||||
for (const metadata of transferList) {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isBase: metadata.isBase }), 'writer.prepare()')
|
||||
const incrementalExport = await this._sourceRemoteAdapter.readIncrementalVmBackup(metadata, undefined, {
|
||||
useChain: false,
|
||||
})
|
||||
|
||||
const differentialVhds = {}
|
||||
|
||||
await asyncEach(Object.entries(incrementalExport.streams), async ([key, stream]) => {
|
||||
differentialVhds[key] = await isVhdDifferencingDisk(stream)
|
||||
})
|
||||
|
||||
incrementalExport.streams = mapValues(incrementalExport.streams, this._throttleStream)
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(incrementalExport),
|
||||
differentialVhds,
|
||||
timestamp: metadata.timestamp,
|
||||
vm: metadata.vm,
|
||||
vmSnapshot: metadata.vmSnapshot,
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
// for healthcheck
|
||||
this._tags = metadata.vm.tags
|
||||
}
|
||||
} else {
|
||||
Task.info('No new data to upload for this VM')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const IncrementalRemote = IncrementalRemoteVmBackupRunner
|
||||
decorateMethodsWith(IncrementalRemoteVmBackupRunner, {
|
||||
_run: defer,
|
||||
})
|
||||
@@ -1,26 +1,26 @@
|
||||
'use strict'
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { pipeline } from 'node:stream'
|
||||
import findLast from 'lodash/findLast.js'
|
||||
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
|
||||
import keyBy from 'lodash/keyBy.js'
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
import vhdStreamValidator from 'vhd-lib/vhdStreamValidator.js'
|
||||
|
||||
const findLast = require('lodash/findLast.js')
|
||||
const keyBy = require('lodash/keyBy.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const vhdStreamValidator = require('vhd-lib/vhdStreamValidator.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { pipeline } = require('node:stream')
|
||||
|
||||
const { IncrementalRemoteWriter } = require('../_writers/IncrementalRemoteWriter.js')
|
||||
const { IncrementalXapiWriter } = require('../_writers/IncrementalXapiWriter.js')
|
||||
const { exportIncrementalVm } = require('../../_incrementalVm.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
const { watchStreamSize } = require('../../_watchStreamSize.js')
|
||||
const { AbstractXapi } = require('./_AbstractXapi.js')
|
||||
const { forkDeltaExport } = require('./_forkDeltaExport.js')
|
||||
import { AbstractXapi } from './_AbstractXapi.mjs'
|
||||
import { exportIncrementalVm } from '../../_incrementalVm.mjs'
|
||||
import { forkDeltaExport } from './_forkDeltaExport.mjs'
|
||||
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
|
||||
import { IncrementalXapiWriter } from '../_writers/IncrementalXapiWriter.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
import { watchStreamSize } from '../../_watchStreamSize.mjs'
|
||||
|
||||
const { debug } = createLogger('xo:backups:IncrementalXapiVmBackup')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
exports.IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXapi {
|
||||
export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXapi {
|
||||
_getWriters() {
|
||||
return [IncrementalRemoteWriter, IncrementalXapiWriter]
|
||||
}
|
||||
@@ -30,8 +30,9 @@ exports.IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXa
|
||||
}
|
||||
|
||||
async _copy() {
|
||||
const { exportedVm } = this
|
||||
const baseVm = this._baseVm
|
||||
const vm = this._vm
|
||||
const exportedVm = this._exportedVm
|
||||
const fullVdisRequired = this._fullVdisRequired
|
||||
|
||||
const isFull = fullVdisRequired === undefined || fullVdisRequired.size !== 0
|
||||
@@ -46,12 +47,18 @@ exports.IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXa
|
||||
if (Object.values(deltaExport.streams).some(({ _nbd }) => _nbd)) {
|
||||
Task.info('Transfer data using NBD')
|
||||
}
|
||||
|
||||
const differentialVhds = {}
|
||||
// since isVhdDifferencingDisk is reading and unshifting data in stream
|
||||
// it should be done BEFORE any other stream transform
|
||||
await asyncEach(Object.entries(deltaExport.streams), async ([key, stream]) => {
|
||||
differentialVhds[key] = await isVhdDifferencingDisk(stream)
|
||||
})
|
||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||
|
||||
if (this._settings.validateVhdStreams) {
|
||||
deltaExport.streams = mapValues(deltaExport.streams, stream => pipeline(stream, vhdStreamValidator, noop))
|
||||
}
|
||||
|
||||
deltaExport.streams = mapValues(deltaExport.streams, this._throttleStream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
@@ -60,8 +67,11 @@ exports.IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXa
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(deltaExport),
|
||||
differentialVhds,
|
||||
sizeContainers,
|
||||
timestamp,
|
||||
vm,
|
||||
vmSnapshot: exportedVm,
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
@@ -108,7 +118,7 @@ exports.IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXa
|
||||
return
|
||||
}
|
||||
|
||||
const srcVdis = keyBy(await xapi.getRecords('VDI', await this.vm.$getDisks()), '$ref')
|
||||
const srcVdis = keyBy(await xapi.getRecords('VDI', await this._vm.$getDisks()), '$ref')
|
||||
|
||||
// resolve full record
|
||||
baseVm = await xapi.getRecord('VM', baseVm.$ref)
|
||||
@@ -1,8 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { Task } = require('../../Task.js')
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:AbstractVmRunner')
|
||||
|
||||
@@ -19,7 +17,7 @@ const asyncEach = async (iterable, fn, thisArg = iterable) => {
|
||||
}
|
||||
}
|
||||
|
||||
exports.Abstract = class AbstractVmBackupRunner {
|
||||
export const Abstract = class AbstractVmBackupRunner {
|
||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||
async _callWriters(fn, step, parallel = true) {
|
||||
const writers = this._writers
|
||||
@@ -75,13 +73,21 @@ exports.Abstract = class AbstractVmBackupRunner {
|
||||
}
|
||||
|
||||
// check if current VM has tags
|
||||
const { tags } = this.vm
|
||||
const tags = this._tags
|
||||
const intersect = settings.healthCheckVmsWithTags.some(t => tags.includes(t))
|
||||
|
||||
if (settings.healthCheckVmsWithTags.length !== 0 && !intersect) {
|
||||
return
|
||||
// create a task to have an info in the logs and reports
|
||||
return Task.run(
|
||||
{
|
||||
name: 'health check',
|
||||
},
|
||||
() => {
|
||||
Task.info(`This VM doesn't match the health check's tags for this schedule`)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
await this._callWriters(writer => writer.healthCheck(this._healthCheckSr), 'writer.healthCheck()')
|
||||
await this._callWriters(writer => writer.healthCheck(), 'writer.healthCheck()')
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { Disposable } from 'promise-toolbox'
|
||||
|
||||
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
|
||||
|
||||
import { Abstract } from './_Abstract.mjs'
|
||||
|
||||
export const AbstractRemote = class AbstractRemoteVmBackupRunner extends Abstract {
|
||||
constructor({
|
||||
config,
|
||||
job,
|
||||
healthCheckSr,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
sourceRemoteAdapter,
|
||||
throttleStream,
|
||||
vmUuid,
|
||||
}) {
|
||||
super()
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._sourceRemoteAdapter = sourceRemoteAdapter
|
||||
this._throttleStream = throttleStream
|
||||
this._vmUuid = vmUuid
|
||||
|
||||
const allSettings = job.settings
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const RemoteWriter = this._getRemoteWriter()
|
||||
Object.entries(remoteAdapters).forEach(([remoteId, adapter]) => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
writers.add(
|
||||
new RemoteWriter({
|
||||
adapter,
|
||||
config,
|
||||
healthCheckSr,
|
||||
job,
|
||||
scheduleId: schedule.id,
|
||||
vmUuid,
|
||||
remoteId,
|
||||
settings: targetSettings,
|
||||
})
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
async _computeTransferList(predicate) {
|
||||
const vmBackups = await this._sourceRemoteAdapter.listVmBackups(this._vmUuid, predicate)
|
||||
const localMetada = new Map()
|
||||
Object.values(vmBackups).forEach(metadata => {
|
||||
const timestamp = metadata.timestamp
|
||||
localMetada.set(timestamp, metadata)
|
||||
})
|
||||
const nbRemotes = Object.keys(this.remoteAdapters).length
|
||||
const remoteMetadatas = {}
|
||||
await asyncEach(Object.values(this.remoteAdapters), async remoteAdapter => {
|
||||
const remoteMetadata = await remoteAdapter.listVmBackups(this._vmUuid, predicate)
|
||||
remoteMetadata.forEach(metadata => {
|
||||
const timestamp = metadata.timestamp
|
||||
remoteMetadatas[timestamp] = (remoteMetadatas[timestamp] ?? 0) + 1
|
||||
})
|
||||
})
|
||||
|
||||
let chain = []
|
||||
const timestamps = [...localMetada.keys()]
|
||||
timestamps.sort()
|
||||
for (const timestamp of timestamps) {
|
||||
if (remoteMetadatas[timestamp] !== nbRemotes) {
|
||||
// this backup is not present in all the remote
|
||||
// should be retransfered if not found later
|
||||
chain.push(localMetada.get(timestamp))
|
||||
} else {
|
||||
// backup is present in local and remote : the chain has already been transferred
|
||||
chain = []
|
||||
}
|
||||
}
|
||||
return chain
|
||||
}
|
||||
|
||||
async run() {
|
||||
const handler = this._sourceRemoteAdapter._handler
|
||||
await Disposable.use(await handler.lock(getVmBackupDir(this._vmUuid)), async () => {
|
||||
await this._run()
|
||||
await this._healthCheck()
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,16 @@
|
||||
'use strict'
|
||||
import assert from 'node:assert'
|
||||
import groupBy from 'lodash/groupBy.js'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import { formatDateTime } from '@xen-orchestra/xapi'
|
||||
|
||||
const assert = require('assert')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
import { Abstract } from './_Abstract.mjs'
|
||||
|
||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
const { Abstract } = require('./_Abstract.js')
|
||||
|
||||
class AbstractXapiVmBackupRunner extends Abstract {
|
||||
export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||
constructor({
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
@@ -40,11 +38,11 @@ class AbstractXapiVmBackupRunner extends Abstract {
|
||||
this.timestamp = undefined
|
||||
|
||||
// VM currently backed up
|
||||
this.vm = vm
|
||||
const { tags } = this.vm
|
||||
const tags = (this._tags = vm.tags)
|
||||
|
||||
// VM (snapshot) that is really exported
|
||||
this.exportedVm = undefined
|
||||
this._exportedVm = undefined
|
||||
this._vm = vm
|
||||
|
||||
this._fullVdisRequired = undefined
|
||||
this._getSnapshotNameLabel = getSnapshotNameLabel
|
||||
@@ -66,7 +64,6 @@ class AbstractXapiVmBackupRunner extends Abstract {
|
||||
settings.offlineSnapshot = true
|
||||
}
|
||||
this._settings = settings
|
||||
|
||||
// Create writers
|
||||
{
|
||||
const writers = new Set()
|
||||
@@ -75,13 +72,24 @@ class AbstractXapiVmBackupRunner extends Abstract {
|
||||
const [BackupWriter, ReplicationWriter] = this._getWriters()
|
||||
|
||||
const allSettings = job.settings
|
||||
Object.keys(remoteAdapters).forEach(remoteId => {
|
||||
Object.entries(remoteAdapters).forEach(([remoteId, adapter]) => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
if (targetSettings.exportRetention !== 0) {
|
||||
writers.add(new BackupWriter({ backup: this, remoteId, settings: targetSettings }))
|
||||
writers.add(
|
||||
new BackupWriter({
|
||||
adapter,
|
||||
config,
|
||||
healthCheckSr,
|
||||
job,
|
||||
scheduleId: schedule.id,
|
||||
vmUuid: vm.uuid,
|
||||
remoteId,
|
||||
settings: targetSettings,
|
||||
})
|
||||
)
|
||||
}
|
||||
})
|
||||
srs.forEach(sr => {
|
||||
@@ -90,7 +98,17 @@ class AbstractXapiVmBackupRunner extends Abstract {
|
||||
...allSettings[sr.uuid],
|
||||
}
|
||||
if (targetSettings.copyRetention !== 0) {
|
||||
writers.add(new ReplicationWriter({ backup: this, sr, settings: targetSettings }))
|
||||
writers.add(
|
||||
new ReplicationWriter({
|
||||
config,
|
||||
healthCheckSr,
|
||||
job,
|
||||
scheduleId: schedule.id,
|
||||
vmUuid: vm.uuid,
|
||||
sr,
|
||||
settings: targetSettings,
|
||||
})
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -99,7 +117,7 @@ class AbstractXapiVmBackupRunner extends Abstract {
|
||||
// ensure the VM itself does not have any backup metadata which would be
|
||||
// copied on manual snapshots and interfere with the backup jobs
|
||||
async _cleanMetadata() {
|
||||
const { vm } = this
|
||||
const vm = this._vm
|
||||
if ('xo:backup:job' in vm.other_config) {
|
||||
await vm.update_other_config({
|
||||
'xo:backup:datetime': null,
|
||||
@@ -113,7 +131,7 @@ class AbstractXapiVmBackupRunner extends Abstract {
|
||||
}
|
||||
|
||||
async _snapshot() {
|
||||
const { vm } = this
|
||||
const vm = this._vm
|
||||
const xapi = this._xapi
|
||||
|
||||
const settings = this._settings
|
||||
@@ -138,19 +156,19 @@ class AbstractXapiVmBackupRunner extends Abstract {
|
||||
'xo:backup:vm': vm.uuid,
|
||||
})
|
||||
|
||||
this.exportedVm = await xapi.getRecord('VM', snapshotRef)
|
||||
this._exportedVm = await xapi.getRecord('VM', snapshotRef)
|
||||
|
||||
return this.exportedVm.uuid
|
||||
return this._exportedVm.uuid
|
||||
})
|
||||
} else {
|
||||
this.exportedVm = vm
|
||||
this._exportedVm = vm
|
||||
this.timestamp = Date.now()
|
||||
}
|
||||
}
|
||||
|
||||
async _fetchJobSnapshots() {
|
||||
const jobId = this._jobId
|
||||
const vmRef = this.vm.$ref
|
||||
const vmRef = this._vm.$ref
|
||||
const xapi = this._xapi
|
||||
|
||||
const snapshotsRef = await xapi.getField('VM', vmRef, 'snapshots')
|
||||
@@ -177,7 +195,7 @@ class AbstractXapiVmBackupRunner extends Abstract {
|
||||
const settings = {
|
||||
...baseSettings,
|
||||
...allSettings[scheduleId],
|
||||
...allSettings[this.vm.uuid],
|
||||
...allSettings[this._vm.uuid],
|
||||
}
|
||||
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
@@ -224,7 +242,7 @@ class AbstractXapiVmBackupRunner extends Abstract {
|
||||
await this._cleanMetadata()
|
||||
await this._removeUnusedSnapshots()
|
||||
|
||||
const { vm } = this
|
||||
const vm = this._vm
|
||||
const isRunning = vm.power_state === 'Running'
|
||||
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
|
||||
if (startAfter) {
|
||||
@@ -251,8 +269,7 @@ class AbstractXapiVmBackupRunner extends Abstract {
|
||||
await this._healthCheck()
|
||||
}
|
||||
}
|
||||
exports.AbstractXapi = AbstractXapiVmBackupRunner
|
||||
|
||||
decorateMethodsWith(AbstractXapiVmBackupRunner, {
|
||||
decorateMethodsWith(AbstractXapi, {
|
||||
run: defer,
|
||||
})
|
||||
@@ -1,12 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { mapValues } = require('lodash')
|
||||
const { forkStreamUnpipe } = require('../_forkStreamUnpipe')
|
||||
|
||||
exports.forkDeltaExport = function forkDeltaExport(deltaExport) {
|
||||
return Object.create(deltaExport, {
|
||||
streams: {
|
||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
|
||||
import { forkStreamUnpipe } from '../_forkStreamUnpipe.mjs'
|
||||
|
||||
export function forkDeltaExport(deltaExport) {
|
||||
return Object.create(deltaExport, {
|
||||
streams: {
|
||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -1,13 +1,11 @@
|
||||
'use strict'
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
|
||||
import { AbstractFullWriter } from './_AbstractFullWriter.mjs'
|
||||
|
||||
const { MixinRemoteWriter } = require('./_MixinRemoteWriter.js')
|
||||
const { AbstractFullWriter } = require('./_AbstractFullWriter.js')
|
||||
|
||||
exports.FullRemoteWriter = class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
|
||||
export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
|
||||
@@ -26,15 +24,17 @@ exports.FullRemoteWriter = class FullRemoteWriter extends MixinRemoteWriter(Abst
|
||||
)
|
||||
}
|
||||
|
||||
async _run({ timestamp, sizeContainer, stream }) {
|
||||
const backup = this._backup
|
||||
async _run({ timestamp, sizeContainer, stream, vm, vmSnapshot }) {
|
||||
const settings = this._settings
|
||||
|
||||
const { job, scheduleId, vm } = backup
|
||||
const job = this._job
|
||||
const scheduleId = this._scheduleId
|
||||
|
||||
const adapter = this._adapter
|
||||
|
||||
// TODO: clean VM backup directory
|
||||
let metadata = await this._isAlreadyTransferred(timestamp)
|
||||
if (metadata !== undefined) {
|
||||
// @todo : should skip backup while being vigilant to not stuck the forked stream
|
||||
Task.info('This backup has already been transfered')
|
||||
}
|
||||
|
||||
const oldBackups = getOldEntries(
|
||||
settings.exportRetention - 1,
|
||||
@@ -47,14 +47,14 @@ exports.FullRemoteWriter = class FullRemoteWriter extends MixinRemoteWriter(Abst
|
||||
const dataBasename = basename + '.xva'
|
||||
const dataFilename = this._vmBackupDir + '/' + dataBasename
|
||||
|
||||
const metadata = {
|
||||
metadata = {
|
||||
jobId: job.id,
|
||||
mode: job.mode,
|
||||
scheduleId,
|
||||
timestamp,
|
||||
version: '2.0.0',
|
||||
vm,
|
||||
vmSnapshot: this._backup.exportedVm,
|
||||
vmSnapshot,
|
||||
xva: './' + dataBasename,
|
||||
}
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
'use strict'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import { formatDateTime } from '@xen-orchestra/xapi'
|
||||
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
import { AbstractFullWriter } from './_AbstractFullWriter.mjs'
|
||||
import { MixinXapiWriter } from './_MixinXapiWriter.mjs'
|
||||
import { listReplicatedVms } from './_listReplicatedVms.mjs'
|
||||
|
||||
const { AbstractFullWriter } = require('./_AbstractFullWriter.js')
|
||||
const { MixinXapiWriter } = require('./_MixinXapiWriter.js')
|
||||
const { listReplicatedVms } = require('./_listReplicatedVms.js')
|
||||
|
||||
exports.FullXapiWriter = class FullXapiWriter extends MixinXapiWriter(AbstractFullWriter) {
|
||||
export class FullXapiWriter extends MixinXapiWriter(AbstractFullWriter) {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
|
||||
@@ -32,10 +30,11 @@ exports.FullXapiWriter = class FullXapiWriter extends MixinXapiWriter(AbstractFu
|
||||
)
|
||||
}
|
||||
|
||||
async _run({ timestamp, sizeContainer, stream }) {
|
||||
async _run({ timestamp, sizeContainer, stream, vm }) {
|
||||
const sr = this._sr
|
||||
const settings = this._settings
|
||||
const { job, scheduleId, vm } = this._backup
|
||||
const job = this._job
|
||||
const scheduleId = this._scheduleId
|
||||
|
||||
const { uuid: srUuid, $xapi: xapi } = sr
|
||||
|
||||
@@ -1,35 +1,32 @@
|
||||
'use strict'
|
||||
import assert from 'node:assert'
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { chainVhd, checkVhdChain, openVhd, VhdAbstract } from 'vhd-lib'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { decorateClass } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import { dirname } from 'node:path'
|
||||
|
||||
const assert = require('assert')
|
||||
const map = require('lodash/map.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { chainVhd, checkVhdChain, openVhd, VhdAbstract } = require('vhd-lib')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { decorateClass } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { dirname } = require('path')
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
|
||||
const { MixinRemoteWriter } = require('./_MixinRemoteWriter.js')
|
||||
const { AbstractIncrementalWriter } = require('./_AbstractIncrementalWriter.js')
|
||||
const { checkVhd } = require('./_checkVhd.js')
|
||||
const { packUuid } = require('./_packUuid.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
|
||||
import { AbstractIncrementalWriter } from './_AbstractIncrementalWriter.mjs'
|
||||
import { checkVhd } from './_checkVhd.mjs'
|
||||
import { packUuid } from './_packUuid.mjs'
|
||||
import { Disposable } from 'promise-toolbox'
|
||||
|
||||
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
|
||||
|
||||
class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWriter) {
|
||||
export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWriter) {
|
||||
async checkBaseVdis(baseUuidToSrcVdi) {
|
||||
const { handler } = this._adapter
|
||||
const backup = this._backup
|
||||
const adapter = this._adapter
|
||||
|
||||
const vdisDir = `${this._vmBackupDir}/vdis/${backup.job.id}`
|
||||
const vdisDir = `${this._vmBackupDir}/vdis/${this._job.id}`
|
||||
|
||||
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdi]) => {
|
||||
let found = false
|
||||
@@ -91,11 +88,12 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
||||
async _prepare() {
|
||||
const adapter = this._adapter
|
||||
const settings = this._settings
|
||||
const { scheduleId, vm } = this._backup
|
||||
const scheduleId = this._scheduleId
|
||||
const vmUuid = this._vmUuid
|
||||
|
||||
const oldEntries = getOldEntries(
|
||||
settings.exportRetention - 1,
|
||||
await adapter.listVmBackups(vm.uuid, _ => _.mode === 'delta' && _.scheduleId === scheduleId)
|
||||
await adapter.listVmBackups(vmUuid, _ => _.mode === 'delta' && _.scheduleId === scheduleId)
|
||||
)
|
||||
this._oldEntries = oldEntries
|
||||
|
||||
@@ -134,16 +132,19 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
||||
}
|
||||
}
|
||||
|
||||
async _transfer($defer, { timestamp, deltaExport }) {
|
||||
async _transfer($defer, { differentialVhds, timestamp, deltaExport, vm, vmSnapshot }) {
|
||||
const adapter = this._adapter
|
||||
const backup = this._backup
|
||||
|
||||
const { job, scheduleId, vm } = backup
|
||||
|
||||
const job = this._job
|
||||
const scheduleId = this._scheduleId
|
||||
const settings = this._settings
|
||||
const jobId = job.id
|
||||
const handler = adapter.handler
|
||||
|
||||
// TODO: clean VM backup directory
|
||||
let metadataContent = await this._isAlreadyTransferred(timestamp)
|
||||
if (metadataContent !== undefined) {
|
||||
// @todo : should skip backup while being vigilant to not stuck the forked stream
|
||||
Task.info('This backup has already been transfered')
|
||||
}
|
||||
|
||||
const basename = formatFilenameDate(timestamp)
|
||||
const vhds = mapValues(
|
||||
@@ -158,7 +159,7 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
||||
}/${adapter.getVhdFileName(basename)}`
|
||||
)
|
||||
|
||||
const metadataContent = {
|
||||
metadataContent = {
|
||||
jobId,
|
||||
mode: job.mode,
|
||||
scheduleId,
|
||||
@@ -169,16 +170,16 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
||||
vifs: deltaExport.vifs,
|
||||
vhds,
|
||||
vm,
|
||||
vmSnapshot: this._backup.exportedVm,
|
||||
vmSnapshot,
|
||||
}
|
||||
|
||||
const { size } = await Task.run({ name: 'transfer' }, async () => {
|
||||
let transferSize = 0
|
||||
await Promise.all(
|
||||
map(deltaExport.vdis, async (vdi, id) => {
|
||||
await asyncEach(
|
||||
Object.entries(deltaExport.vdis),
|
||||
async ([id, vdi]) => {
|
||||
const path = `${this._vmBackupDir}/${vhds[id]}`
|
||||
|
||||
const isDelta = vdi.other_config['xo:base_delta'] !== undefined
|
||||
const isDelta = differentialVhds[`${id}.vhd`]
|
||||
let parentPath
|
||||
if (isDelta) {
|
||||
const vdiDir = dirname(path)
|
||||
@@ -191,7 +192,11 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
||||
.sort()
|
||||
.pop()
|
||||
|
||||
assert.notStrictEqual(parentPath, undefined, `missing parent of ${id}`)
|
||||
assert.notStrictEqual(
|
||||
parentPath,
|
||||
undefined,
|
||||
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config['xo:base_delta']}`
|
||||
)
|
||||
|
||||
parentPath = parentPath.slice(1) // remove leading slash
|
||||
|
||||
@@ -204,7 +209,7 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
||||
// merges and chainings
|
||||
checksum: false,
|
||||
validator: tmpPath => checkVhd(handler, tmpPath),
|
||||
writeBlockConcurrency: this._backup.config.writeBlockConcurrency,
|
||||
writeBlockConcurrency: this._config.writeBlockConcurrency,
|
||||
})
|
||||
|
||||
if (isDelta) {
|
||||
@@ -217,8 +222,12 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
||||
await vhd.readBlockAllocationTable() // required by writeFooter()
|
||||
await vhd.writeFooter()
|
||||
})
|
||||
})
|
||||
},
|
||||
{
|
||||
concurrency: settings.diskPerVmConcurrency,
|
||||
}
|
||||
)
|
||||
|
||||
return { size: transferSize }
|
||||
})
|
||||
metadataContent.size = size
|
||||
@@ -227,6 +236,6 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
||||
// TODO: run cleanup?
|
||||
}
|
||||
}
|
||||
exports.IncrementalRemoteWriter = decorateClass(IncrementalRemoteWriter, {
|
||||
decorateClass(IncrementalRemoteWriter, {
|
||||
_transfer: defer,
|
||||
})
|
||||
@@ -1,22 +1,20 @@
|
||||
'use strict'
|
||||
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { formatDateTime } from '@xen-orchestra/xapi'
|
||||
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
import { importIncrementalVm, TAG_COPY_SRC } from '../../_incrementalVm.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
||||
const { importIncrementalVm, TAG_COPY_SRC } = require('../../_incrementalVm.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
import { AbstractIncrementalWriter } from './_AbstractIncrementalWriter.mjs'
|
||||
import { MixinXapiWriter } from './_MixinXapiWriter.mjs'
|
||||
import { listReplicatedVms } from './_listReplicatedVms.mjs'
|
||||
|
||||
const { AbstractIncrementalWriter } = require('./_AbstractIncrementalWriter.js')
|
||||
const { MixinXapiWriter } = require('./_MixinXapiWriter.js')
|
||||
const { listReplicatedVms } = require('./_listReplicatedVms.js')
|
||||
|
||||
exports.IncrementalXapiWriter = class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWriter) {
|
||||
export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWriter) {
|
||||
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
||||
const sr = this._sr
|
||||
const replicatedVm = listReplicatedVms(sr.$xapi, this._backup.job.id, sr.uuid, this._backup.vm.uuid).find(
|
||||
const replicatedVm = listReplicatedVms(sr.$xapi, this._job.id, sr.uuid, this._vmUuid).find(
|
||||
vm => vm.other_config[TAG_COPY_SRC] === baseVm.uuid
|
||||
)
|
||||
if (replicatedVm === undefined) {
|
||||
@@ -49,9 +47,10 @@ exports.IncrementalXapiWriter = class IncrementalXapiWriter extends MixinXapiWri
|
||||
type: 'SR',
|
||||
},
|
||||
})
|
||||
const hasHealthCheckSr = this._healthCheckSr !== undefined
|
||||
this.transfer = task.wrapFn(this.transfer)
|
||||
this.cleanup = task.wrapFn(this.cleanup)
|
||||
this.healthCheck = task.wrapFn(this.healthCheck, true)
|
||||
this.cleanup = task.wrapFn(this.cleanup, !hasHealthCheckSr)
|
||||
this.healthCheck = task.wrapFn(this.healthCheck, hasHealthCheckSr)
|
||||
|
||||
return task.run(() => this._prepare())
|
||||
}
|
||||
@@ -59,12 +58,13 @@ exports.IncrementalXapiWriter = class IncrementalXapiWriter extends MixinXapiWri
|
||||
async _prepare() {
|
||||
const settings = this._settings
|
||||
const { uuid: srUuid, $xapi: xapi } = this._sr
|
||||
const { scheduleId, vm } = this._backup
|
||||
const vmUuid = this._vmUuid
|
||||
const scheduleId = this._scheduleId
|
||||
|
||||
// delete previous interrupted copies
|
||||
ignoreErrors.call(asyncMapSettled(listReplicatedVms(xapi, scheduleId, undefined, vm.uuid), vm => vm.$destroy))
|
||||
ignoreErrors.call(asyncMapSettled(listReplicatedVms(xapi, scheduleId, undefined, vmUuid), vm => vm.$destroy))
|
||||
|
||||
this._oldEntries = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vm.uuid))
|
||||
this._oldEntries = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vmUuid))
|
||||
|
||||
if (settings.deleteFirst) {
|
||||
await this._deleteOldEntries()
|
||||
@@ -81,10 +81,11 @@ exports.IncrementalXapiWriter = class IncrementalXapiWriter extends MixinXapiWri
|
||||
return asyncMapSettled(this._oldEntries, vm => vm.$destroy())
|
||||
}
|
||||
|
||||
async _transfer({ timestamp, deltaExport, sizeContainers }) {
|
||||
async _transfer({ timestamp, deltaExport, sizeContainers, vm }) {
|
||||
const { _warmMigration } = this._settings
|
||||
const sr = this._sr
|
||||
const { job, scheduleId, vm } = this._backup
|
||||
const job = this._job
|
||||
const scheduleId = this._scheduleId
|
||||
|
||||
const { uuid: srUuid, $xapi: xapi } = sr
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { AbstractWriter } = require('./_AbstractWriter.js')
|
||||
|
||||
exports.AbstractFullWriter = class AbstractFullWriter extends AbstractWriter {
|
||||
async run({ timestamp, sizeContainer, stream }) {
|
||||
try {
|
||||
return await this._run({ timestamp, sizeContainer, stream })
|
||||
} finally {
|
||||
// ensure stream is properly closed
|
||||
stream.destroy()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
import { AbstractWriter } from './_AbstractWriter.mjs'
|
||||
|
||||
export class AbstractFullWriter extends AbstractWriter {
|
||||
async run({ timestamp, sizeContainer, stream, vm, vmSnapshot }) {
|
||||
try {
|
||||
return await this._run({ timestamp, sizeContainer, stream, vm, vmSnapshot })
|
||||
} finally {
|
||||
// ensure stream is properly closed
|
||||
stream.destroy()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
'use strict'
|
||||
import { AbstractWriter } from './_AbstractWriter.mjs'
|
||||
|
||||
const { AbstractWriter } = require('./_AbstractWriter.js')
|
||||
|
||||
exports.AbstractIncrementalWriter = class AbstractIncrementalWriter extends AbstractWriter {
|
||||
export class AbstractIncrementalWriter extends AbstractWriter {
|
||||
checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
@@ -15,9 +13,9 @@ exports.AbstractIncrementalWriter = class AbstractIncrementalWriter extends Abst
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async transfer({ timestamp, deltaExport, sizeContainers }) {
|
||||
async transfer({ deltaExport, ...other }) {
|
||||
try {
|
||||
return await this._transfer({ timestamp, deltaExport, sizeContainers })
|
||||
return await this._transfer({ deltaExport, ...other })
|
||||
} finally {
|
||||
// ensure all streams are properly closed
|
||||
for (const stream of Object.values(deltaExport.streams)) {
|
||||
@@ -1,14 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
exports.AbstractWriter = class AbstractWriter {
|
||||
constructor({ backup, settings }) {
|
||||
this._backup = backup
|
||||
this._settings = settings
|
||||
}
|
||||
|
||||
beforeBackup() {}
|
||||
|
||||
afterBackup() {}
|
||||
|
||||
healthCheck(sr) {}
|
||||
}
|
||||
29
@xen-orchestra/backups/_runners/_writers/_AbstractWriter.mjs
Normal file
29
@xen-orchestra/backups/_runners/_writers/_AbstractWriter.mjs
Normal file
@@ -0,0 +1,29 @@
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
|
||||
|
||||
export class AbstractWriter {
|
||||
constructor({ config, healthCheckSr, job, vmUuid, scheduleId, settings }) {
|
||||
this._config = config
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._job = job
|
||||
this._scheduleId = scheduleId
|
||||
this._settings = settings
|
||||
this._vmUuid = vmUuid
|
||||
}
|
||||
|
||||
beforeBackup() {}
|
||||
|
||||
afterBackup() {}
|
||||
|
||||
healthCheck(sr) {}
|
||||
|
||||
_isAlreadyTransferred(timestamp) {
|
||||
const vmUuid = this._vmUuid
|
||||
const adapter = this._adapter
|
||||
const backupDir = getVmBackupDir(vmUuid)
|
||||
try {
|
||||
const actualMetadata = JSON.parse(adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`))
|
||||
return actualMetadata
|
||||
} catch (error) {}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +1,27 @@
|
||||
'use strict'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { join } from 'node:path'
|
||||
import assert from 'node:assert'
|
||||
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { join } = require('path')
|
||||
|
||||
const assert = require('assert')
|
||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
||||
const { getVmBackupDir } = require('../../_getVmBackupDir.js')
|
||||
const { HealthCheckVmBackup } = require('../../HealthCheckVmBackup.js')
|
||||
const { ImportVmBackup } = require('../../ImportVmBackup.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
const MergeWorker = require('../../merge-worker/index.js')
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
|
||||
import { HealthCheckVmBackup } from '../../HealthCheckVmBackup.mjs'
|
||||
import { ImportVmBackup } from '../../ImportVmBackup.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
import * as MergeWorker from '../../merge-worker/index.mjs'
|
||||
|
||||
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
|
||||
|
||||
exports.MixinRemoteWriter = (BaseClass = Object) =>
|
||||
export const MixinRemoteWriter = (BaseClass = Object) =>
|
||||
class MixinRemoteWriter extends BaseClass {
|
||||
#lock
|
||||
|
||||
constructor({ remoteId, ...rest }) {
|
||||
constructor({ remoteId, adapter, ...rest }) {
|
||||
super(rest)
|
||||
|
||||
this._adapter = rest.backup.remoteAdapters[remoteId]
|
||||
this._adapter = adapter
|
||||
this._remoteId = remoteId
|
||||
|
||||
this._vmBackupDir = getVmBackupDir(this._backup.vm.uuid)
|
||||
this._vmBackupDir = getVmBackupDir(rest.vmUuid)
|
||||
}
|
||||
|
||||
async _cleanVm(options) {
|
||||
@@ -38,7 +36,7 @@ exports.MixinRemoteWriter = (BaseClass = Object) =>
|
||||
Task.warning(message, data)
|
||||
},
|
||||
lock: false,
|
||||
mergeBlockConcurrency: this._backup.config.mergeBlockConcurrency,
|
||||
mergeBlockConcurrency: this._config.mergeBlockConcurrency,
|
||||
})
|
||||
})
|
||||
} catch (error) {
|
||||
@@ -55,7 +53,7 @@ exports.MixinRemoteWriter = (BaseClass = Object) =>
|
||||
}
|
||||
|
||||
async afterBackup() {
|
||||
const { disableMergeWorker } = this._backup.config
|
||||
const { disableMergeWorker } = this._config
|
||||
// merge worker only compatible with local remotes
|
||||
const { handler } = this._adapter
|
||||
const willMergeInWorker = !disableMergeWorker && typeof handler.getRealPath === 'function'
|
||||
@@ -70,13 +68,15 @@ exports.MixinRemoteWriter = (BaseClass = Object) =>
|
||||
// add a random suffix to avoid collision in case multiple tasks are created at the same second
|
||||
Math.random().toString(36).slice(2)
|
||||
|
||||
await handler.outputFile(taskFile, this._backup.vm.uuid)
|
||||
await handler.outputFile(taskFile, this._vmUuid)
|
||||
const remotePath = handler.getRealPath()
|
||||
await MergeWorker.run(remotePath)
|
||||
}
|
||||
}
|
||||
|
||||
healthCheck(sr) {
|
||||
healthCheck() {
|
||||
const sr = this._healthCheckSr
|
||||
assert.notStrictEqual(sr, undefined, 'SR should be defined before making a health check')
|
||||
assert.notStrictEqual(
|
||||
this._metadataFileName,
|
||||
undefined,
|
||||
@@ -109,4 +109,16 @@ exports.MixinRemoteWriter = (BaseClass = Object) =>
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
_isAlreadyTransferred(timestamp) {
|
||||
const vmUuid = this._vmUuid
|
||||
const adapter = this._adapter
|
||||
const backupDir = getVmBackupDir(vmUuid)
|
||||
try {
|
||||
const actualMetadata = JSON.parse(
|
||||
adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`)
|
||||
)
|
||||
return actualMetadata
|
||||
} catch (error) {}
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { extractOpaqueRef } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { Task } = require('../../Task')
|
||||
const assert = require('node:assert/strict')
|
||||
const { HealthCheckVmBackup } = require('../../HealthCheckVmBackup')
|
||||
|
||||
exports.MixinXapiWriter = (BaseClass = Object) =>
|
||||
class MixinXapiWriter extends BaseClass {
|
||||
constructor({ sr, ...rest }) {
|
||||
super(rest)
|
||||
|
||||
this._sr = sr
|
||||
}
|
||||
|
||||
healthCheck(sr) {
|
||||
assert.notEqual(this._targetVmRef, undefined, 'A vm should have been transfered to be health checked')
|
||||
// copy VM
|
||||
return Task.run(
|
||||
{
|
||||
name: 'health check',
|
||||
},
|
||||
async () => {
|
||||
const { $xapi: xapi } = sr
|
||||
let clonedVm
|
||||
try {
|
||||
const baseVm = xapi.getObject(this._targetVmRef) ?? (await xapi.waitObject(this._targetVmRef))
|
||||
const clonedRef = await xapi
|
||||
.callAsync('VM.clone', this._targetVmRef, `Health Check - ${baseVm.name_label}`)
|
||||
.then(extractOpaqueRef)
|
||||
clonedVm = xapi.getObject(clonedRef) ?? (await xapi.waitObject(clonedRef))
|
||||
|
||||
await new HealthCheckVmBackup({
|
||||
restoredVm: clonedVm,
|
||||
xapi,
|
||||
}).run()
|
||||
} finally {
|
||||
clonedVm && (await xapi.VM_destroy(clonedVm.$ref))
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,72 @@
|
||||
import { extractOpaqueRef } from '@xen-orchestra/xapi'
|
||||
import assert from 'node:assert/strict'
|
||||
|
||||
import { HealthCheckVmBackup } from '../../HealthCheckVmBackup.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
export const MixinXapiWriter = (BaseClass = Object) =>
|
||||
class MixinXapiWriter extends BaseClass {
|
||||
constructor({ sr, ...rest }) {
|
||||
super(rest)
|
||||
|
||||
this._sr = sr
|
||||
}
|
||||
|
||||
// check if the base Vm has all its disk on health check sr
|
||||
async #isAlreadyOnHealthCheckSr(baseVm) {
|
||||
const xapi = baseVm.$xapi
|
||||
const vdiRefs = await xapi.VM_getDisks(baseVm.$ref)
|
||||
for (const vdiRef of vdiRefs) {
|
||||
const vdi = xapi.getObject(vdiRef)
|
||||
if (vdi.$SR.uuid !== this._healthCheckSr.uuid) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
healthCheck() {
|
||||
const sr = this._healthCheckSr
|
||||
assert.notStrictEqual(sr, undefined, 'SR should be defined before making a health check')
|
||||
assert.notEqual(this._targetVmRef, undefined, 'A vm should have been transfered to be health checked')
|
||||
// copy VM
|
||||
return Task.run(
|
||||
{
|
||||
name: 'health check',
|
||||
},
|
||||
async () => {
|
||||
const { $xapi: xapi } = sr
|
||||
let healthCheckVmRef
|
||||
try {
|
||||
const baseVm = xapi.getObject(this._targetVmRef) ?? (await xapi.waitObject(this._targetVmRef))
|
||||
|
||||
if (await this.#isAlreadyOnHealthCheckSr(baseVm)) {
|
||||
healthCheckVmRef = await Task.run(
|
||||
{ name: 'cloning-vm' },
|
||||
async () =>
|
||||
await xapi
|
||||
.callAsync('VM.clone', this._targetVmRef, `Health Check - ${baseVm.name_label}`)
|
||||
.then(extractOpaqueRef)
|
||||
)
|
||||
} else {
|
||||
healthCheckVmRef = await Task.run(
|
||||
{ name: 'copying-vm' },
|
||||
async () =>
|
||||
await xapi
|
||||
.callAsync('VM.copy', this._targetVmRef, `Health Check - ${baseVm.name_label}`, sr.$ref)
|
||||
.then(extractOpaqueRef)
|
||||
)
|
||||
}
|
||||
const healthCheckVm = xapi.getObject(healthCheckVmRef) ?? (await xapi.waitObject(healthCheckVmRef))
|
||||
|
||||
await new HealthCheckVmBackup({
|
||||
restoredVm: healthCheckVm,
|
||||
xapi,
|
||||
}).run()
|
||||
} finally {
|
||||
healthCheckVmRef && (await xapi.VM_destroy(healthCheckVmRef))
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const openVhd = require('vhd-lib').openVhd
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
|
||||
exports.checkVhd = async function checkVhd(handler, path) {
|
||||
await Disposable.use(openVhd(handler, path), () => {})
|
||||
}
|
||||
6
@xen-orchestra/backups/_runners/_writers/_checkVhd.mjs
Normal file
6
@xen-orchestra/backups/_runners/_writers/_checkVhd.mjs
Normal file
@@ -0,0 +1,6 @@
|
||||
import { openVhd } from 'vhd-lib'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
|
||||
export async function checkVhd(handler, path) {
|
||||
await Disposable.use(openVhd(handler, path), () => {})
|
||||
}
|
||||
@@ -1,5 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const getReplicatedVmDatetime = vm => {
|
||||
const { 'xo:backup:datetime': datetime = vm.name_label.slice(-17, -1) } = vm.other_config
|
||||
return datetime
|
||||
@@ -7,7 +5,7 @@ const getReplicatedVmDatetime = vm => {
|
||||
|
||||
const compareReplicatedVmDatetime = (a, b) => (getReplicatedVmDatetime(a) < getReplicatedVmDatetime(b) ? -1 : 1)
|
||||
|
||||
exports.listReplicatedVms = function listReplicatedVms(xapi, scheduleOrJobId, srUuid, vmUuid) {
|
||||
export function listReplicatedVms(xapi, scheduleOrJobId, srUuid, vmUuid) {
|
||||
const { all } = xapi.objects
|
||||
const vms = {}
|
||||
for (const key in all) {
|
||||
@@ -1,7 +1,5 @@
|
||||
'use strict'
|
||||
|
||||
const PARSE_UUID_RE = /-/g
|
||||
|
||||
exports.packUuid = function packUuid(uuid) {
|
||||
export function packUuid(uuid) {
|
||||
return Buffer.from(uuid.replace(PARSE_UUID_RE, ''), 'hex')
|
||||
}
|
||||
@@ -1,6 +1,4 @@
|
||||
'use strict'
|
||||
|
||||
exports.watchStreamSize = function watchStreamSize(stream, container = { size: 0 }) {
|
||||
export function watchStreamSize(stream, container = { size: 0 }) {
|
||||
stream.on('data', data => {
|
||||
container.size += data.length
|
||||
})
|
||||
@@ -171,13 +171,16 @@ job:
|
||||
# For replication jobs, indicates which SRs to use
|
||||
srs: IdPattern
|
||||
|
||||
# Here for historical reasons
|
||||
type: 'backup'
|
||||
type: 'backup' | 'mirrorBackup'
|
||||
|
||||
# Indicates which VMs to backup/replicate
|
||||
# Indicates which VMs to backup/replicate for a xapi to remote backup job
|
||||
vms: IdPattern
|
||||
|
||||
# Indicates which remote to read from for a mirror backup job
|
||||
sourceRemote: IdPattern
|
||||
|
||||
# Indicates which XAPI to use to connect to a specific VM or SR
|
||||
# for remote to remote backup job,this is only needed if there is healtcheck
|
||||
recordToXapi:
|
||||
[ObjectId]: XapiId
|
||||
|
||||
@@ -228,7 +231,7 @@ Settings are described in [`@xen-orchestra/backups/Backup.js](https://github.com
|
||||
- `prepare({ isFull })`
|
||||
- `transfer({ timestamp, deltaExport, sizeContainers })`
|
||||
- `cleanup()`
|
||||
- `healthCheck(sr)`
|
||||
- `healthCheck()` // is not executed if no health check sr or tag doesn't match
|
||||
- **Full**
|
||||
- `run({ timestamp, sizeContainer, stream })`
|
||||
- `afterBackup()`
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
'use strict'
|
||||
|
||||
exports.extractIdsFromSimplePattern = function extractIdsFromSimplePattern(pattern) {
|
||||
export function extractIdsFromSimplePattern(pattern) {
|
||||
if (pattern === undefined) {
|
||||
return []
|
||||
}
|
||||
@@ -1,7 +1,5 @@
|
||||
'use strict'
|
||||
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const { dirname } = require('path')
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
import { dirname } from 'node:path'
|
||||
|
||||
function formatVmBackup(backup) {
|
||||
return {
|
||||
@@ -31,6 +29,6 @@ function formatVmBackup(backup) {
|
||||
}
|
||||
|
||||
// format all backups as returned by RemoteAdapter#listAllVmBackups()
|
||||
exports.formatVmBackups = function formatVmBackups(backupsByVM) {
|
||||
export function formatVmBackups(backupsByVM) {
|
||||
return mapValues(backupsByVM, backups => backups.map(formatVmBackup))
|
||||
}
|
||||
@@ -2,19 +2,17 @@
|
||||
// eslint-disable-next-line eslint-comments/disable-enable-pair
|
||||
/* eslint-disable n/shebang */
|
||||
|
||||
'use strict'
|
||||
import { catchGlobalErrors } from '@xen-orchestra/log/configure'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
import { join } from 'node:path'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import min from 'lodash/min.js'
|
||||
|
||||
const { catchGlobalErrors } = require('@xen-orchestra/log/configure')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { getSyncedHandler } = require('@xen-orchestra/fs')
|
||||
const { join } = require('path')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const min = require('lodash/min')
|
||||
import { getVmBackupDir } from '../_getVmBackupDir.mjs'
|
||||
import { RemoteAdapter } from '../RemoteAdapter.mjs'
|
||||
|
||||
const { getVmBackupDir } = require('../_getVmBackupDir.js')
|
||||
const { RemoteAdapter } = require('../RemoteAdapter.js')
|
||||
|
||||
const { CLEAN_VM_QUEUE } = require('./index.js')
|
||||
import { CLEAN_VM_QUEUE } from './index.mjs'
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
'use strict'
|
||||
import { join } from 'node:path'
|
||||
import { spawn } from 'child_process'
|
||||
import { check } from 'proper-lockfile'
|
||||
|
||||
const { join, resolve } = require('path')
|
||||
const { spawn } = require('child_process')
|
||||
const { check } = require('proper-lockfile')
|
||||
export const CLEAN_VM_QUEUE = '/xo-vm-backups/.queue/clean-vm/'
|
||||
|
||||
const CLEAN_VM_QUEUE = (exports.CLEAN_VM_QUEUE = '/xo-vm-backups/.queue/clean-vm/')
|
||||
const CLI_PATH = new URL('cli.mjs', import.meta.url).pathname
|
||||
|
||||
const CLI_PATH = resolve(__dirname, 'cli.js')
|
||||
exports.run = async function runMergeWorker(remotePath) {
|
||||
export const run = async function runMergeWorker(remotePath) {
|
||||
try {
|
||||
// TODO: find a way to pass the acquire the lock and then pass it down the worker
|
||||
if (await check(join(remotePath, CLEAN_VM_QUEUE))) {
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user