Compare commits

..

1 Commits

Author SHA1 Message Date
Florent Beauchamp
d47c7c6064 feat(s3): add completion control to s3 backup 2022-05-09 10:01:09 +02:00
313 changed files with 11737 additions and 16198 deletions

View File

@@ -4,6 +4,7 @@ about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**

2
.gitignore vendored
View File

@@ -10,6 +10,8 @@
/packages/*/dist/
/packages/*/node_modules/
/packages/vhd-cli/src/commands/index.js
/packages/xen-api/examples/node_modules/
/packages/xen-api/plot.dat

View File

@@ -14,7 +14,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`

View File

@@ -32,7 +32,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`

View File

@@ -9,16 +9,7 @@ class AggregateError extends Error {
}
}
/**
* @template Item
* @param {Iterable<Item>} iterable
* @param {(item: Item, index: number, iterable: Iterable<Item>) => Promise<void>} iteratee
* @returns {Promise<void>}
*/
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 10, signal, stopOnError = true } = {}) {
if (concurrency === 0) {
concurrency = Infinity
}
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 1, signal, stopOnError = true } = {}) {
return new Promise((resolve, reject) => {
const it = (iterable[Symbol.iterator] || iterable[Symbol.asyncIterator]).call(iterable)
const errors = []

View File

@@ -36,7 +36,7 @@ describe('asyncEach', () => {
it('works', async () => {
const iteratee = jest.fn(async () => {})
await asyncEach.call(thisArg, iterable, iteratee, { concurrency: 1 })
await asyncEach.call(thisArg, iterable, iteratee)
expect(iteratee.mock.instances).toEqual(Array.from(values, () => thisArg))
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
@@ -66,7 +66,7 @@ describe('asyncEach', () => {
}
})
expect(await rejectionOf(asyncEach(iterable, iteratee, { concurrency: 1, stopOnError: true }))).toBe(error)
expect(await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: true }))).toBe(error)
expect(iteratee).toHaveBeenCalledTimes(2)
})
@@ -91,9 +91,7 @@ describe('asyncEach', () => {
}
})
await expect(asyncEach(iterable, iteratee, { concurrency: 1, signal: ac.signal })).rejects.toThrow(
'asyncEach aborted'
)
await expect(asyncEach(iterable, iteratee, { signal: ac.signal })).rejects.toThrow('asyncEach aborted')
expect(iteratee).toHaveBeenCalledTimes(2)
})
})

View File

@@ -24,7 +24,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"version": "0.1.0",
"engines": {
"node": ">=8.10"
},

View File

@@ -9,7 +9,7 @@ exports.EventListenersManager = class EventListenersManager {
}
add(type, listener) {
let listeners = this._listeners.get(type)
let listeners = this._listeners[type]
if (listeners === undefined) {
listeners = new Set()
this._listeners.set(type, listeners)

View File

@@ -1,67 +0,0 @@
'use strict'
const t = require('tap')
const { EventEmitter } = require('events')
const { EventListenersManager } = require('./')
const noop = Function.prototype
// function spy (impl = Function.prototype) {
// function spy() {
// spy.calls.push([Array.from(arguments), this])
// }
// spy.calls = []
// return spy
// }
function assertListeners(t, event, listeners) {
t.strictSame(t.context.ee.listeners(event), listeners)
}
t.beforeEach(function (t) {
t.context.ee = new EventEmitter()
t.context.em = new EventListenersManager(t.context.ee)
})
t.test('.add adds a listener', function (t) {
t.context.em.add('foo', noop)
assertListeners(t, 'foo', [noop])
t.end()
})
t.test('.add does not add a duplicate listener', function (t) {
t.context.em.add('foo', noop).add('foo', noop)
assertListeners(t, 'foo', [noop])
t.end()
})
t.test('.remove removes a listener', function (t) {
t.context.em.add('foo', noop).remove('foo', noop)
assertListeners(t, 'foo', [])
t.end()
})
t.test('.removeAll removes all listeners of a given type', function (t) {
t.context.em.add('foo', noop).add('bar', noop).removeAll('foo')
assertListeners(t, 'foo', [])
assertListeners(t, 'bar', [noop])
t.end()
})
t.test('.removeAll removes all listeners', function (t) {
t.context.em.add('foo', noop).add('bar', noop).removeAll()
assertListeners(t, 'foo', [])
assertListeners(t, 'bar', [])
t.end()
})

View File

@@ -35,12 +35,8 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.1",
"version": "1.0.0",
"scripts": {
"postversion": "npm publish --access public",
"test": "tap --branches=72"
},
"devDependencies": {
"tap": "^16.2.0"
"postversion": "npm publish --access public"
}
}

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,71 +0,0 @@
'use strict'
const LRU = require('lru-cache')
const Fuse = require('fuse-native')
const { VhdSynthetic } = require('vhd-lib')
const { Disposable, fromCallback } = require('promise-toolbox')
const { createLogger } = require('@xen-orchestra/log')
const { warn } = createLogger('vates:fuse-vhd')
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
const stat = st => ({
mtime: st.mtime || new Date(),
atime: st.atime || new Date(),
ctime: st.ctime || new Date(),
size: st.size !== undefined ? st.size : 0,
mode: st.mode === 'dir' ? 16877 : st.mode === 'file' ? 33188 : st.mode === 'link' ? 41453 : st.mode,
uid: st.uid !== undefined ? st.uid : process.getuid(),
gid: st.gid !== undefined ? st.gid : process.getgid(),
})
exports.mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
const cache = new LRU({
max: 16, // each cached block is 2MB in size
})
await vhd.readBlockAllocationTable()
const fuse = new Fuse(mountDir, {
async readdir(path, cb) {
if (path === '/') {
return cb(null, ['vhd0'])
}
cb(Fuse.ENOENT)
},
async getattr(path, cb) {
if (path === '/') {
return cb(
null,
stat({
mode: 'dir',
size: 4096,
})
)
}
if (path === '/vhd0') {
return cb(
null,
stat({
mode: 'file',
size: vhd.footer.currentSize,
})
)
}
cb(Fuse.ENOENT)
},
read(path, fd, buf, len, pos, cb) {
if (path === '/vhd0') {
return vhd
.readRawData(pos, len, cache, buf)
.then(cb)
}
throw new Error(`read file ${path} not exists`)
},
})
return new Disposable(
() => fromCallback(() => fuse.unmount()),
fromCallback(() => fuse.mount())
)
})

View File

@@ -1,30 +0,0 @@
{
"name": "@vates/fuse-vhd",
"version": "1.0.0",
"license": "ISC",
"private": false,
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/fuse-vhd",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"engines": {
"node": ">=10.0"
},
"dependencies": {
"@xen-orchestra/log": "^0.3.0",
"fuse-native": "^2.2.6",
"lru-cache": "^7.14.0",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^4.1.0"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,41 +0,0 @@
export const INIT_PASSWD = 'NBDMAGIC' // "NBDMAGIC" ensure we're connected to a nbd server
export const OPTS_MAGIC = 'IHAVEOPT' // "IHAVEOPT" start an option block
export const NBD_OPT_REPLY_MAGIC = 0x3e889045565a9 // magic received during negociation
export const NBD_OPT_EXPORT_NAME = 1
export const NBD_OPT_ABORT = 2
export const NBD_OPT_LIST = 3
export const NBD_OPT_STARTTLS = 5
export const NBD_OPT_INFO = 6
export const NBD_OPT_GO = 7
export const NBD_FLAG_HAS_FLAGS = 1 << 0
export const NBD_FLAG_READ_ONLY = 1 << 1
export const NBD_FLAG_SEND_FLUSH = 1 << 2
export const NBD_FLAG_SEND_FUA = 1 << 3
export const NBD_FLAG_ROTATIONAL = 1 << 4
export const NBD_FLAG_SEND_TRIM = 1 << 5
export const NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
export const NBD_CMD_FLAG_FUA = 1 << 0
export const NBD_CMD_FLAG_NO_HOLE = 1 << 1
export const NBD_CMD_FLAG_DF = 1 << 2
export const NBD_CMD_FLAG_REQ_ONE = 1 << 3
export const NBD_CMD_FLAG_FAST_ZERO = 1 << 4
export const NBD_CMD_READ = 0
export const NBD_CMD_WRITE = 1
export const NBD_CMD_DISC = 2
export const NBD_CMD_FLUSH = 3
export const NBD_CMD_TRIM = 4
export const NBD_CMD_CACHE = 5
export const NBD_CMD_WRITE_ZEROES = 6
export const NBD_CMD_BLOCK_STATUS = 7
export const NBD_CMD_RESIZE = 8
export const NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
export const NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
export const NBD_DEFAULT_PORT = 10809
export const NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
export const MAX_BUFFER_LENGTH = 10 * 1024 * 1024

View File

@@ -1,272 +0,0 @@
import assert from 'node:assert'
import { Socket } from 'node:net'
import { connect } from 'node:tls'
import {
INIT_PASSWD,
MAX_BUFFER_LENGTH,
NBD_CMD_READ,
NBD_DEFAULT_BLOCK_SIZE,
NBD_DEFAULT_PORT,
NBD_FLAG_FIXED_NEWSTYLE,
NBD_FLAG_HAS_FLAGS,
NBD_OPT_EXPORT_NAME,
NBD_OPT_REPLY_MAGIC,
NBD_OPT_STARTTLS,
NBD_REPLY_MAGIC,
NBD_REQUEST_MAGIC,
OPTS_MAGIC,
} from './constants.mjs'
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
export default class NbdClient {
_serverAddress
_serverCert
_serverPort
_serverSocket
_useSecureConnection = false
_exportname
_nbDiskBlocks = 0
_receptionBuffer = Buffer.alloc(0)
_sendingBuffer = Buffer.alloc(0)
// ensure the read are resolved in the right order
_rawReadResolve = []
_rawReadLength = []
// AFAIK, there is no guaranty the server answer in the same order as the query
_nextCommandQueryId = BigInt(0)
_commandQueries = {} // map of queries waiting for an answer
constructor({ address, port = NBD_DEFAULT_PORT, exportname, cert, secure = true }) {
this._address = address
this._serverPort = port
this._exportname = exportname
this._serverCert = cert
this._useSecureConnection = secure
}
get nbBlocks() {
return this._nbDiskBlocks
}
_handleData(data) {
if (data !== undefined) {
this._receptionBuffer = Buffer.concat([this._receptionBuffer, Buffer.from(data)])
}
if (this._receptionBuffer.length > MAX_BUFFER_LENGTH) {
throw new Error(
`Buffer grown too much with a total size of ${this._receptionBuffer.length} bytes (last chunk is ${data.length})`
)
}
// if we're waiting for a specific bit length (in the handshake for example or a block data)
while (this._rawReadResolve.length > 0 && this._receptionBuffer.length >= this._rawReadLength[0]) {
const resolve = this._rawReadResolve.shift()
const waitingForLength = this._rawReadLength.shift()
resolve(this._takeFromBuffer(waitingForLength))
}
if (this._rawReadResolve.length === 0 && this._receptionBuffer.length > 4) {
if (this._receptionBuffer.readInt32BE(0) === NBD_REPLY_MAGIC) {
this._readBlockResponse()
}
// keep the received bits in the buffer for subsequent use
}
}
async _addListenners() {
const serverSocket = this._serverSocket
serverSocket.on('data', data => this._handleData(data))
serverSocket.on('close', function () {
console.log('Connection closed')
})
serverSocket.on('error', function (err) {
throw err
})
}
async _tlsConnect() {
return new Promise(resolve => {
this._serverSocket = connect(
{
socket: this._serverSocket,
rejectUnauthorized: false,
cert: this._serverCert,
},
resolve
)
this._addListenners()
})
}
async _unsecureConnect() {
this._serverSocket = new Socket()
this._addListenners()
return new Promise((resolve, reject) => {
this._serverSocket.connect(this._serverPort, this._serverAddress, () => {
resolve()
})
})
}
async connect() {
await this._unsecureConnect()
await this._handshake()
}
async disconnect() {
await this._serverSocket.destroy()
}
async _sendOption(option, buffer = Buffer.alloc(0)) {
await this._writeToSocket(OPTS_MAGIC)
await this._writeToSocketInt32(option)
await this._writeToSocketInt32(buffer.length)
await this._writeToSocket(buffer)
assert(await this._readFromSocketInt64(), NBD_OPT_REPLY_MAGIC) // magic number everywhere
assert(await this._readFromSocketInt32(), option) // the option passed
assert(await this._readFromSocketInt32(), 1) // ACK
const length = await this._readFromSocketInt32()
assert(length === 0) // length
}
async _handshake() {
assert(await this._readFromSocket(8), INIT_PASSWD)
assert(await this._readFromSocket(8), OPTS_MAGIC)
const flagsBuffer = await this._readFromSocket(2)
const flags = flagsBuffer.readInt16BE(0)
assert(flags | NBD_FLAG_FIXED_NEWSTYLE) // only FIXED_NEWSTYLE one is supported from the server options
await this._writeToSocketInt32(NBD_FLAG_FIXED_NEWSTYLE) // client also support NBD_FLAG_C_FIXED_NEWSTYLE
if (this._useSecureConnection) {
// upgrade socket to TLS
await this._sendOption(NBD_OPT_STARTTLS)
await this._tlsConnect()
}
// send export name required it also implictly closes the negociation phase
await this._writeToSocket(Buffer.from(OPTS_MAGIC))
await this._writeToSocketInt32(NBD_OPT_EXPORT_NAME)
await this._writeToSocketInt32(this._exportname.length)
await this._writeToSocket(Buffer.from(this._exportname))
// 8 + 2 + 124
const answer = await this._readFromSocket(134)
const exportSize = answer.readBigUInt64BE(0)
const transmissionFlags = answer.readInt16BE(8)
assert(transmissionFlags & NBD_FLAG_HAS_FLAGS, 'NBD_FLAG_HAS_FLAGS') // must always be 1 by the norm
// xapi server always send NBD_FLAG_READ_ONLY (3) as a flag
this._nbDiskBlocks = Number(exportSize / BigInt(NBD_DEFAULT_BLOCK_SIZE))
this._exportSize = exportSize
}
_takeFromBuffer(length) {
const res = Buffer.from(this._receptionBuffer.slice(0, length))
this._receptionBuffer = this._receptionBuffer.slice(length)
return res
}
_readFromSocket(length) {
if (this._receptionBuffer.length >= length) {
return this._takeFromBuffer(length)
}
return new Promise(resolve => {
this._rawReadResolve.push(resolve)
this._rawReadLength.push(length)
})
}
_writeToSocket(buffer) {
return new Promise(resolve => {
this._serverSocket.write(buffer, resolve)
})
}
async _readFromSocketInt32() {
const buffer = await this._readFromSocket(4)
return buffer.readInt32BE(0)
}
async _readFromSocketInt64() {
const buffer = await this._readFromSocket(8)
return buffer.readBigUInt64BE(0)
}
_writeToSocketUInt32(int) {
const buffer = Buffer.alloc(4)
buffer.writeUInt32BE(int)
return this._writeToSocket(buffer)
}
_writeToSocketInt32(int) {
const buffer = Buffer.alloc(4)
buffer.writeInt32BE(int)
return this._writeToSocket(buffer)
}
_writeToSocketInt16(int) {
const buffer = Buffer.alloc(2)
buffer.writeInt16BE(int)
return this._writeToSocket(buffer)
}
_writeToSocketInt64(int) {
const buffer = Buffer.alloc(8)
buffer.writeBigUInt64BE(BigInt(int))
return this._writeToSocket(buffer)
}
async _readBlockResponse() {
const magic = await this._readFromSocketInt32()
if (magic !== NBD_REPLY_MAGIC) {
throw new Error(`magic number for block answer is wrong : ${magic}`)
}
// error
const error = await this._readFromSocketInt32()
if (error !== 0) {
throw new Error(`GOT ERROR CODE : ${error}`)
}
const blockQueryId = await this._readFromSocketInt64()
const query = this._commandQueries[blockQueryId]
if (!query) {
throw new Error(` no query associated with id ${blockQueryId} ${Object.keys(this._commandQueries)}`)
}
delete this._commandQueries[blockQueryId]
const data = await this._readFromSocket(query.size)
assert.strictEqual(data.length, query.size)
query.resolve(data)
this._handleData()
}
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
const queryId = this._nextCommandQueryId
this._nextCommandQueryId++
const buffer = Buffer.alloc(28)
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0)
buffer.writeInt16BE(0, 4) // no command flags for a simple block read
buffer.writeInt16BE(NBD_CMD_READ, 6)
buffer.writeBigUInt64BE(queryId, 8)
// byte offset in the raw disk
const offset = BigInt(index) * BigInt(size)
buffer.writeBigUInt64BE(offset, 16)
// ensure we do not read after the end of the export (which immediatly disconnect us)
const maxSize = Math.min(Number(this._exportSize - offset), size)
// size wanted
buffer.writeInt32BE(maxSize, 24)
return new Promise(resolve => {
this._commandQueries[queryId] = {
size: maxSize,
resolve,
}
// write command at once to ensure no concurrency issue
this._writeToSocket(buffer)
})
}
}

View File

@@ -1,75 +0,0 @@
import assert from 'assert'
import NbdClient from './index.mjs'
import { spawn } from 'node:child_process'
import fs from 'node:fs/promises'
import { test } from 'tap'
import tmp from 'tmp'
import { pFromCallback } from 'promise-toolbox'
import { asyncEach } from '@vates/async-each'
const FILE_SIZE = 2 * 1024 * 1024
async function createTempFile(size) {
const tmpPath = await pFromCallback(cb => tmp.file(cb))
const data = Buffer.alloc(size, 0)
for (let i = 0; i < size; i += 4) {
data.writeUInt32BE(i, i)
}
await fs.writeFile(tmpPath, data)
return tmpPath
}
test('it works with unsecured network', async tap => {
const path = await createTempFile(FILE_SIZE)
const nbdServer = spawn(
'nbdkit',
[
'file',
path,
'--newstyle', //
'--exit-with-parent',
'--read-only',
'--export-name=MY_SECRET_EXPORT',
],
{
stdio: ['inherit', 'inherit', 'inherit'],
}
)
const client = new NbdClient({
address: 'localhost',
exportname: 'MY_SECRET_EXPORT',
secure: false,
})
await client.connect()
const CHUNK_SIZE = 32 * 1024 // non default size
const indexes = []
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
indexes.push(i)
}
// read mutiple blocks in parallel
await asyncEach(
indexes,
async i => {
const block = await client.readBlock(i, CHUNK_SIZE)
let blockOk = true
let firstFail
for (let j = 0; j < CHUNK_SIZE; j += 4) {
const wanted = i * CHUNK_SIZE + j
const found = block.readUInt32BE(j)
blockOk = blockOk && found === wanted
if (!blockOk && firstFail === undefined) {
firstFail = j
}
}
tap.ok(blockOk, `check block ${i} content`)
},
{ concurrency: 8 }
)
await client.disconnect()
nbdServer.kill()
await fs.unlink(path)
})

View File

@@ -1,30 +0,0 @@
{
"private": true,
"name": "@vates/nbd-client",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/nbd-client",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/nbd-client",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "AGPL-3.0-or-later",
"version": "0.0.1",
"engines": {
"node": ">=14.0"
},
"dependencies": {
"@vates/async-each": "^1.0.0",
"@xen-orchestra/async-map": "^0.1.2",
"promise-toolbox": "^0.21.0",
"xen-api": "^1.2.2"
},
"devDependencies": {
"tap": "^16.3.0",
"tmp": "^0.2.1"
}
}

View File

@@ -1,152 +0,0 @@
import NbdClient from '../index.js'
import { Xapi } from 'xen-api'
import readline from 'node:readline'
import { stdin as input, stdout as output } from 'node:process'
import { asyncMap } from '@xen-orchestra/async-map'
import { downloadVhd, getFullBlocks, getChangedNbdBlocks } from './utils.mjs'
const xapi = new Xapi({
auth: {
user: 'root',
password: 'vateslab',
},
url: '172.16.210.11',
allowUnauthorized: true,
})
await xapi.connect()
const networks = await xapi.call('network.get_all_records')
const nbdNetworks = Object.values(networks).filter(
network => network.purpose.includes('nbd') || network.purpose.includes('insecure_nbd')
)
let secure = false
if (!nbdNetworks.length) {
console.log(`you don't have any nbd enabled network`)
console.log(`please add a purpose of nbd (to use tls) or insecure_nbd to oneof the host network`)
process.exit()
}
const network = nbdNetworks[0]
secure = network.purpose.includes('nbd')
console.log(`we will use network **${network.name_label}** ${secure ? 'with' : 'without'} TLS`)
const rl = readline.createInterface({ input, output })
const question = text => {
return new Promise(resolve => {
rl.question(text, resolve)
})
}
let vmuuid, vmRef
do {
vmuuid = await question('VM uuid ? ')
try {
vmRef = xapi.getObject(vmuuid).$ref
} catch (e) {
// console.log(e)
console.log('maybe the objects was not loaded, try again ')
await new Promise(resolve => setTimeout(resolve, 1000))
}
} while (!vmRef)
const vdiRefs = (
await asyncMap(await xapi.call('VM.get_VBDs', vmRef), async vbd => {
const vdi = await xapi.call('VBD.get_VDI', vbd)
return vdi
})
).filter(vdiRef => vdiRef !== 'OpaqueRef:NULL')
const vdiRef = vdiRefs[0]
const vdi = xapi.getObject(vdiRef)
console.log('Will work on vdi [', vdi.name_label, ']')
const cbt_enabled = vdi.cbt_enabled
console.log('Change block tracking is [', cbt_enabled ? 'enabled' : 'disabled', ']')
if (!cbt_enabled) {
const shouldEnable = await question('would you like to enable it ? Y/n ')
if (shouldEnable === 'Y') {
await xapi.call('VDI.enable_cbt', vdiRef)
console.log('CBT is now enable for this VDI')
console.log('You must make a snapshot, write some data and relaunch this script to backup changes')
} else {
console.warn('did nothing')
}
process.exit()
}
console.log('will search for suitable snapshots')
const snapshots = vdi.snapshots.map(snapshotRef => xapi.getObject(snapshotRef)).filter(({ cbt_enabled }) => cbt_enabled)
if (snapshots.length < 2) {
throw new Error(`not enough snapshots with cbt enabled , found ${snapshots.length} and 2 are needed`)
}
console.log('found snapshots will compare last two snapshots with cbt_enabled')
const snapshotRef = xapi.getObject(snapshots[snapshots.length - 1].uuid).$ref
const snapshotTarget = xapi.getObject(snapshots[snapshots.length - 2].uuid).$ref
console.log('older snapshot is ', xapi.getObject(snapshotRef).snapshot_time)
console.log('newer one is ', xapi.getObject(snapshotTarget).snapshot_time)
console.log('## will get bitmap of changed blocks')
const cbt = Buffer.from(await xapi.call('VDI.list_changed_blocks', snapshotRef, snapshotTarget), 'base64')
console.log('got changes')
console.log('will connect to NBD server')
const nbd = (await xapi.call('VDI.get_nbd_info', snapshotTarget))[0]
if (!nbd) {
console.error('Nbd is not enabled on the host')
console.error('you should add `insecure_nbd` as the `purpose` of a network of this host')
process.exit()
}
nbd.secure = true
// console.log(nbd)
const client = new NbdClient(nbd)
await client.connect()
// @todo : should also handle last blocks that could be incomplete
const stats = {}
for (const nbBlocksRead of [32, 16, 8, 4, 2, 1]) {
const blockSize = nbBlocksRead * 64 * 1024
stats[blockSize] = {}
const MASK = 0x80
const test = (map, bit) => ((map[bit >> 3] << (bit & 7)) & MASK) !== 0
const changed = []
for (let i = 0; i < (cbt.length * 8) / nbBlocksRead; i++) {
let blockChanged = false
for (let j = 0; j < nbBlocksRead; j++) {
blockChanged = blockChanged || test(cbt, i * nbBlocksRead + j)
}
if (blockChanged) {
changed.push(i)
}
}
console.log(changed.length, 'block changed')
for (const concurrency of [32, 16, 8, 4, 2]) {
const { speed } = await getChangedNbdBlocks(client, changed, concurrency, blockSize)
stats[blockSize][concurrency] = speed
}
}
console.log('speed summary')
console.table(stats)
console.log('## will check full download of the base vdi ')
await getFullBlocks(client, 16, 512 * 1024) // a good sweet spot
console.log('## will check vhd delta export size and speed')
console.log('## will check full vhd export size and speed')
await downloadVhd(xapi, {
format: 'vhd',
vdi: snapshotTarget,
})

View File

@@ -1,97 +0,0 @@
import NbdClient from '../index.js'
import { Xapi } from 'xen-api'
import { asyncMap } from '@xen-orchestra/async-map'
import { downloadVhd, getFullBlocks } from './utils.mjs'
import fs from 'fs/promises'
const xapi = new Xapi({
auth: {
user: 'root',
password: 'vateslab',
},
url: '172.16.210.11',
allowUnauthorized: true,
})
await xapi.connect()
const vmuuid = '123e4f2b-498e-d0af-15ae-f835a1e9f59f'
let vmRef
do {
try {
vmRef = xapi.getObject(vmuuid).$ref
} catch (e) {
console.log('maybe the objects was not loaded, try again ')
await new Promise(resolve => setTimeout(resolve, 1000))
}
} while (!vmRef)
const vdiRefs = (
await asyncMap(await xapi.call('VM.get_VBDs', vmRef), async vbd => {
const vdi = await xapi.call('VBD.get_VDI', vbd)
return vdi
})
).filter(vdiRef => vdiRef !== 'OpaqueRef:NULL')
const vdiRef = vdiRefs[0]
const vdi = xapi.getObject(vdiRef)
console.log('Will work on vdi [', vdi.name_label, ']')
console.log('will search for suitable snapshots')
const snapshots = vdi.snapshots.map(snapshotRef => xapi.getObject(snapshotRef))
console.log('found snapshots will use the last one for tests')
const snapshotRef = xapi.getObject(snapshots[snapshots.length - 1].uuid).$ref
console.log('will connect to NBD server')
const nbd = (await xapi.call('VDI.get_nbd_info', snapshotRef))[0]
if (!nbd) {
console.error('Nbd is not enabled on the host')
console.error('you should add `insecure_nbd` as the `purpose` of a network of this host')
process.exit()
}
if (!nbd) {
console.error('Nbd is not enabled on the host')
console.error('you should add `insecure_nbd` as the `purpose` of a network of this host')
process.exit()
}
const nbdClient = new NbdClient(nbd)
await nbdClient.connect()
let fd = await fs.open('/tmp/nbd.raw', 'w')
await getFullBlocks({
nbdClient,
concurrency: 8,
nbBlocksRead: 16 /* 1MB block */,
fd,
})
console.log(' done nbd ')
await fd.close()
fd = await fs.open('/tmp/export.raw', 'w')
await downloadVhd({
xapi,
query: {
format: 'raw',
vdi: snapshotRef,
},
fd,
})
fd.close()
fd = await fs.open('/tmp/export.vhd', 'w')
await downloadVhd({
xapi,
query: {
format: 'vhd',
vdi: snapshotRef,
},
fd,
})
fd.close()

View File

@@ -1,117 +0,0 @@
import NbdClient from '../index.js'
import { Xapi } from 'xen-api'
import readline from 'node:readline'
import { stdin as input, stdout as output } from 'node:process'
import { asyncMap } from '@xen-orchestra/async-map'
import { downloadVhd, getFullBlocks } from './utils.mjs'
const xapi = new Xapi({
auth: {
user: 'root',
password: 'vateslab',
},
url: '172.16.210.11',
allowUnauthorized: true,
})
await xapi.connect()
const networks = await xapi.call('network.get_all_records')
console.log({ networks })
const nbdNetworks = Object.values(networks).filter(
network => network.purpose.includes('nbd') || network.purpose.includes('insecure_nbd')
)
let secure = false
if (!nbdNetworks.length) {
console.log(`you don't have any nbd enabled network`)
console.log(`please add a purpose of nbd (to use tls) or insecure_nbd to oneof the host network`)
process.exit()
}
const network = nbdNetworks[0]
secure = network.purpose.includes('nbd')
console.log(`we will use network **${network.name_label}** ${secure ? 'with' : 'without'} TLS`)
const rl = readline.createInterface({ input, output })
const question = text => {
return new Promise(resolve => {
rl.question(text, resolve)
})
}
let vmuuid, vmRef
do {
vmuuid = '123e4f2b-498e-d0af-15ae-f835a1e9f59f' // await question('VM uuid ? ')
try {
vmRef = xapi.getObject(vmuuid).$ref
} catch (e) {
console.log(e)
console.log('maybe the objects was not loaded, try again ')
}
} while (!vmRef)
const vdiRefs = (
await asyncMap(await xapi.call('VM.get_VBDs', vmRef), async vbd => {
const vdi = await xapi.call('VBD.get_VDI', vbd)
return vdi
})
).filter(vdiRef => vdiRef !== 'OpaqueRef:NULL')
const vdiRef = vdiRefs[0]
const vdi = xapi.getObject(vdiRef)
console.log('Will work on vdi [', vdi.name_label, ']')
console.log('will search for suitable snapshots')
const snapshots = vdi.snapshots.map(snapshotRef => xapi.getObject(snapshotRef))
console.log('found snapshots will use the last one for tests')
const snapshotRef = xapi.getObject(snapshots[snapshots.length - 1].uuid).$ref
console.log('will connect to NBD server')
const nbd = (await xapi.call('VDI.get_nbd_info', snapshotRef))[0]
if (!nbd) {
console.error('Nbd is not enabled on the host')
console.error('you should add `insecure_nbd` as the `purpose` of a network of this host')
process.exit()
}
nbd.secure = secure
const nbdClient = new NbdClient(nbd)
await nbdClient.connect()
const maxDuration =
parseInt(await question('Maximum duration per test in second ? (-1 for unlimited, default 30) '), 10) || 30
console.log('Will start downloading blocks during ', maxDuration, 'seconds')
console.log('## will check the vhd download speed')
const stats = {}
for (const nbBlocksRead of [32, 16, 8, 4, 2, 1]) {
stats[nbBlocksRead * 64 * 1024] = {}
for (const concurrency of [32, 16, 8, 4, 2]) {
const { speed } = await getFullBlocks({ nbdClient, concurrency, nbBlocksRead })
stats[concurrency] = speed
}
}
console.log('speed summary')
console.table(stats)
console.log('## will check full vhd export size and speed')
await downloadVhd(xapi, {
format: 'vhd',
vdi: snapshotRef,
})
console.log('## will check full raw export size and speed')
await downloadVhd(xapi, {
format: 'raw',
vdi: snapshotRef,
})
process.exit()

View File

@@ -1,116 +0,0 @@
import { asyncEach } from '@vates/async-each'
import { CancelToken } from 'promise-toolbox'
import zlib from 'node:zlib'
export async function getChangedNbdBlocks(nbdClient, changed, concurrency, blockSize) {
let nbModified = 0
let size = 0
let compressedSize = 0
const start = new Date()
console.log('### with concurrency ', concurrency, ' blockSize ', blockSize / 1024 / 1024, 'MB')
const interval = setInterval(() => {
console.log(`${nbModified} block handled in ${new Date() - start} ms`)
}, 5000)
await asyncEach(
changed,
async blockIndex => {
if (new Date() - start > 30000) {
return
}
const data = await nbdClient.readBlock(blockIndex, blockSize)
await new Promise(resolve => {
zlib.gzip(data, { level: zlib.constants.Z_BEST_SPEED }, (_, compressed) => {
compressedSize += compressed.length
resolve()
})
})
size += data?.length ?? 0
nbModified++
},
{
concurrency,
}
)
clearInterval(interval)
console.log('duration :', new Date() - start)
console.log('read : ', size, 'octets, compressed: ', compressedSize, 'ratio ', size / compressedSize)
console.log('speed : ', Math.round(((size / 1024 / 1024) * 1000) / (new Date() - start)), 'MB/s')
return { speed: Math.round(((size / 1024 / 1024) * 1000) / (new Date() - start)) }
}
export async function getFullBlocks({ nbdClient, concurrency = 1, nbBlocksRead = 1, fd, maxDuration = -1 } = {}) {
const blockSize = nbBlocksRead * 64 * 1024
let nbModified = 0
let size = 0
console.log('### with concurrency ', concurrency)
const start = new Date()
console.log(' max nb blocks ', nbdClient.nbBlocks / nbBlocksRead)
function* blockIterator() {
for (let i = 0; i < nbdClient.nbBlocks / nbBlocksRead; i++) {
yield i
}
}
const interval = setInterval(() => {
console.log(`${nbModified} block handled in ${new Date() - start} ms`)
}, 5000)
await asyncEach(
blockIterator(),
async blockIndex => {
if (maxDuration > 0 && new Date() - start > maxDuration * 1000) {
return
}
const data = await nbdClient.readBlock(blockIndex, blockSize)
size += data?.length ?? 0
nbModified++
if (fd) {
await fd.write(data, 0, data.length, blockIndex * blockSize)
}
},
{
concurrency,
}
)
clearInterval(interval)
if (new Date() - start < 10000) {
console.warn(
`data set too small or performance to high, result won't be usefull. Please relaunch with bigger snapshot or higher maximum data size `
)
}
console.log('duration :', new Date() - start)
console.log('nb blocks : ', nbModified)
console.log('read : ', size, 'octets')
const speed = Math.round(((size / 1024 / 1024) * 1000 * 100) / (new Date() - start)) / 100
console.log('speed : ', speed, 'MB/s')
return { speed }
}
export async function downloadVhd({ xapi, query, fd, maxDuration = -1 } = {}) {
const startStream = new Date()
let sizeStream = 0
let nbChunk = 0
const interval = setInterval(() => {
console.log(`${nbChunk} chunks , ${sizeStream} octets handled in ${new Date() - startStream} ms`)
}, 5000)
const stream = await xapi.getResource(CancelToken.none, '/export_raw_vdi/', {
query,
})
for await (const chunk of stream) {
sizeStream += chunk.length
if (fd) {
await fd.write(chunk)
}
nbChunk++
if (maxDuration > 0 && new Date() - startStream > maxDuration * 1000) {
break
}
}
clearInterval(interval)
console.log('Stream duration :', new Date() - startStream)
console.log('Stream read : ', sizeStream, 'octets')
const speed = Math.round(((sizeStream / 1024 / 1024) * 1000 * 100) / (new Date() - startStream)) / 100
console.log('speed : ', speed, 'MB/s')
}

View File

@@ -1,9 +1,6 @@
### `readChunk(stream, [size])`
- returns the next available chunk of data
- like `stream.read()`, a number of bytes can be specified
- returns with less data than expected if stream has ended
- returns `null` if the stream has ended and no data has been read
- returns `null` if the stream has ended
```js
import { readChunk } from '@vates/read-chunk'
@@ -14,13 +11,3 @@ import { readChunk } from '@vates/read-chunk'
}
})()
```
### `readChunkStrict(stream, [size])`
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
```js
import { readChunkStrict } from '@vates/read-chunk'
const chunk = await readChunkStrict(stream, 1024)
```

View File

@@ -16,12 +16,9 @@ Installation of the [npm package](https://npmjs.org/package/@vates/read-chunk):
## Usage
### `readChunk(stream, [size])`
- returns the next available chunk of data
- like `stream.read()`, a number of bytes can be specified
- returns with less data than expected if stream has ended
- returns `null` if the stream has ended and no data has been read
- returns `null` if the stream has ended
```js
import { readChunk } from '@vates/read-chunk'
@@ -33,16 +30,6 @@ import { readChunk } from '@vates/read-chunk'
})()
```
### `readChunkStrict(stream, [size])`
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
```js
import { readChunkStrict } from '@vates/read-chunk'
const chunk = await readChunkStrict(stream, 1024)
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -30,22 +30,3 @@ const readChunk = (stream, size) =>
onReadable()
})
exports.readChunk = readChunk
exports.readChunkStrict = async function readChunkStrict(stream, size) {
const chunk = await readChunk(stream, size)
if (chunk === null) {
throw new Error('stream has ended without data')
}
if (size !== undefined && chunk.length !== size) {
const error = new Error('stream has ended with not enough data')
Object.defineProperties(error, {
chunk: {
value: chunk,
},
})
throw error
}
return chunk
}

View File

@@ -4,7 +4,7 @@
const { Readable } = require('stream')
const { readChunk, readChunkStrict } = require('./')
const { readChunk } = require('./')
const makeStream = it => Readable.from(it, { objectMode: false })
makeStream.obj = Readable.from
@@ -43,27 +43,3 @@ describe('readChunk', () => {
})
})
})
const rejectionOf = promise =>
promise.then(
value => {
throw value
},
error => error
)
describe('readChunkStrict', function () {
it('throws if stream is empty', async () => {
const error = await rejectionOf(readChunkStrict(makeStream([])))
expect(error).toBeInstanceOf(Error)
expect(error.message).toBe('stream has ended without data')
expect(error.chunk).toEqual(undefined)
})
it('throws if stream ends with not enough data', async () => {
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
expect(error).toBeInstanceOf(Error)
expect(error.message).toBe('stream has ended with not enough data')
expect(error.chunk).toEqual(Buffer.from('foobar'))
})
})

View File

@@ -19,7 +19,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "1.0.0",
"version": "0.1.2",
"engines": {
"node": ">=8.10"
},

View File

@@ -26,13 +26,7 @@ module.exports = async function main(args) {
await asyncMap(_, async vmDir => {
vmDir = resolve(vmDir)
try {
await adapter.cleanVm(vmDir, {
fixMetadata: fix,
remove,
merge,
logInfo: (...args) => console.log(...args),
logWarn: (...args) => console.warn(...args),
})
await adapter.cleanVm(vmDir, { fixMetadata: fix, remove, merge, onLog: (...args) => console.warn(...args) })
} catch (error) {
console.error('adapter.cleanVm', vmDir, error)
}

View File

@@ -7,8 +7,8 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.28.0",
"@xen-orchestra/fs": "^3.1.0",
"@xen-orchestra/backups": "^0.22.0",
"@xen-orchestra/fs": "^1.0.1",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
@@ -27,7 +27,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.7.8",
"version": "0.7.1",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -6,7 +6,7 @@ const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { compileTemplate } = require('@xen-orchestra/template')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { extractIdsFromSimplePattern } = require('./extractIdsFromSimplePattern.js')
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern.js')
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
const { Task } = require('./Task.js')
const { VmBackup } = require('./_VmBackup.js')
@@ -24,34 +24,6 @@ const getAdaptersByRemote = adapters => {
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
const DEFAULT_SETTINGS = {
reportWhen: 'failure',
}
const DEFAULT_VM_SETTINGS = {
bypassVdiChainsCheck: false,
checkpointSnapshot: false,
concurrency: 2,
copyRetention: 0,
deleteFirst: false,
exportRetention: 0,
fullInterval: 0,
healthCheckSr: undefined,
healthCheckVmsWithTags: [],
maxMergedDeltasPerRun: 2,
offlineBackup: false,
offlineSnapshot: false,
snapshotRetention: 0,
timeout: 0,
unconditionalSnapshot: false,
vmTimeout: 0,
}
const DEFAULT_METADATA_SETTINGS = {
retentionPoolMetadata: 0,
retentionXoMetadata: 0,
}
exports.Backup = class Backup {
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
this._config = config
@@ -70,22 +42,17 @@ exports.Backup = class Backup {
'{job.name}': job.name,
'{vm.name_label}': vm => vm.name_label,
})
}
const { type } = job
const baseSettings = { ...DEFAULT_SETTINGS }
run() {
const type = this._job.type
if (type === 'backup') {
Object.assign(baseSettings, DEFAULT_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
this.run = this._runVmBackup
return this._runVmBackup()
} else if (type === 'metadataBackup') {
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
this.run = this._runMetadataBackup
return this._runMetadataBackup()
} else {
throw new Error(`No runner for the backup type ${type}`)
}
Object.assign(baseSettings, job.settings[''])
this._baseSettings = baseSettings
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
}
async _runMetadataBackup() {
@@ -97,6 +64,13 @@ exports.Backup = class Backup {
}
const config = this._config
const settings = {
...config.defaultSettings,
...config.metadata.defaultSettings,
...job.settings[''],
...job.settings[schedule.id],
}
const poolIds = extractIdsFromSimplePattern(job.pools)
const isEmptyPools = poolIds.length === 0
const isXoMetadata = job.xoMetadata !== undefined
@@ -104,8 +78,6 @@ exports.Backup = class Backup {
throw new Error('no metadata mode found')
}
const settings = this._settings
const { retentionPoolMetadata, retentionXoMetadata } = settings
if (
@@ -217,7 +189,14 @@ exports.Backup = class Backup {
const schedule = this._schedule
const config = this._config
const settings = this._settings
const { settings } = job
const scheduleSettings = {
...config.defaultSettings,
...config.vm.defaultSettings,
...settings[''],
...settings[schedule.id],
}
await Disposable.use(
Disposable.all(
extractIdsFromSimplePattern(job.srs).map(id =>
@@ -245,15 +224,14 @@ exports.Backup = class Backup {
})
)
),
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
async (srs, remoteAdapters, healthCheckSr) => {
async (srs, remoteAdapters) => {
// remove adapters that failed (already handled)
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
// remove srs that failed (already handled)
srs = srs.filter(_ => _ !== undefined)
if (remoteAdapters.length === 0 && srs.length === 0 && settings.snapshotRetention === 0) {
if (remoteAdapters.length === 0 && srs.length === 0 && scheduleSettings.snapshotRetention === 0) {
return
}
@@ -263,27 +241,23 @@ exports.Backup = class Backup {
remoteAdapters = getAdaptersByRemote(remoteAdapters)
const allSettings = this._job.settings
const baseSettings = this._baseSettings
const handleVm = vmUuid =>
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
Disposable.use(this._getRecord('VM', vmUuid), vm =>
new VmBackup({
baseSettings,
config,
getSnapshotNameLabel,
healthCheckSr,
job,
// remotes,
remoteAdapters,
schedule,
settings: { ...settings, ...allSettings[vm.uuid] },
settings: { ...scheduleSettings, ...settings[vmUuid] },
srs,
vm,
}).run()
)
)
const { concurrency } = settings
const { concurrency } = scheduleSettings
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
}
)

View File

@@ -1,64 +0,0 @@
'use strict'
const { Task } = require('./Task')
exports.HealthCheckVmBackup = class HealthCheckVmBackup {
#xapi
#restoredVm
constructor({ restoredVm, xapi }) {
this.#restoredVm = restoredVm
this.#xapi = xapi
}
async run() {
return Task.run(
{
name: 'vmstart',
},
async () => {
let restoredVm = this.#restoredVm
const xapi = this.#xapi
const restoredId = restoredVm.uuid
// remove vifs
await Promise.all(restoredVm.$VIFs.map(vif => xapi.callAsync('VIF.destroy', vif.$ref)))
const start = new Date()
// start Vm
await xapi.callAsync(
'VM.start',
restoredVm.$ref,
false, // Start paused?
false // Skip pre-boot checks?
)
const started = new Date()
const timeout = 10 * 60 * 1000
const startDuration = started - start
let remainingTimeout = timeout - startDuration
if (remainingTimeout < 0) {
throw new Error(`VM ${restoredId} not started after ${timeout / 1000} second`)
}
// wait for the 'Running' event to be really stored in local xapi object cache
restoredVm = await xapi.waitObjectState(restoredVm.$ref, vm => vm.power_state === 'Running', {
timeout: remainingTimeout,
})
const running = new Date()
remainingTimeout -= running - started
if (remainingTimeout < 0) {
throw new Error(`local xapi did not get Runnig state for VM ${restoredId} after ${timeout / 1000} second`)
}
// wait for the guest tool version to be defined
await xapi.waitObjectState(restoredVm.guest_metrics, gm => gm?.PV_drivers_version?.major !== undefined, {
timeout: remainingTimeout,
})
}
)
}
}

View File

@@ -1,7 +1,6 @@
'use strict'
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const { synchronized } = require('decorator-synchronized')
const Disposable = require('promise-toolbox/Disposable')
const fromCallback = require('promise-toolbox/fromCallback')
const fromEvent = require('promise-toolbox/fromEvent')
@@ -10,27 +9,22 @@ const groupBy = require('lodash/groupBy.js')
const pickBy = require('lodash/pickBy.js')
const { dirname, join, normalize, resolve } = require('path')
const { createLogger } = require('@xen-orchestra/log')
const { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
const { Constants, createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
const { deduped } = require('@vates/disposable/deduped.js')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { compose } = require('@vates/compose')
const { execFile } = require('child_process')
const { readdir, lstat } = require('fs-extra')
const { readdir, stat } = require('fs-extra')
const { v4: uuidv4 } = require('uuid')
const { ZipFile } = require('yazl')
const zlib = require('zlib')
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
const { cleanVm } = require('./_cleanVm.js')
const { formatFilenameDate } = require('./_filenameDate.js')
const { getTmpDir } = require('./_getTmpDir.js')
const { isMetadataFile } = require('./_backupType.js')
const { isValidXva } = require('./_isValidXva.js')
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
const { lvs, pvs } = require('./_lvm.js')
// @todo : this import is marked extraneous , sould be fixed when lib is published
const { mount } = require('@vates/fuse-vhd')
const { asyncEach } = require('@vates/async-each')
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
@@ -38,7 +32,7 @@ exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
const { warn } = createLogger('xo:backups:RemoteAdapter')
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
@@ -48,13 +42,16 @@ const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
const RE_VHDI = /^vhdi(\d+)$/
async function addDirectory(files, realPath, metadataPath) {
const stats = await lstat(realPath)
if (stats.isDirectory()) {
await asyncMap(await readdir(realPath), file =>
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
)
} else if (stats.isFile()) {
try {
const subFiles = await readdir(realPath)
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
} catch (error) {
if (error == null || error.code !== 'ENOTDIR') {
throw error
}
files.push({
realPath,
metadataPath,
@@ -76,14 +73,11 @@ const debounceResourceFactory = factory =>
}
class RemoteAdapter {
constructor(handler, { debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy=false } = {}) {
constructor(handler, { debounceResource = res => res, dirMode, vhdDirectoryCompression } = {}) {
this._debounceResource = debounceResource
this._dirMode = dirMode
this._handler = handler
this._vhdDirectoryCompression = vhdDirectoryCompression
this._readCacheListVmBackups = synchronized.withKey()(this._readCacheListVmBackups)
this._useGetDiskLegacy = useGetDiskLegacy
}
get handler() {
@@ -131,9 +125,7 @@ class RemoteAdapter {
}
async *_getPartition(devicePath, partition) {
// the norecovery option is necessary because if the partition is dirty,
// mount will try to fix it which is impossible if because the device is read-only
const options = ['loop', 'ro', 'norecovery']
const options = ['loop', 'ro']
if (partition !== undefined) {
const { size, start } = partition
@@ -230,30 +222,11 @@ class RemoteAdapter {
return promise
}
#removeVmBackupsFromCache(backups) {
for (const [dir, filenames] of Object.entries(
groupBy(
backups.map(_ => _._filename),
dirname
)
)) {
// detached async action, will not reject
this._updateCache(dir + '/cache.json.gz', backups => {
for (const filename of filenames) {
debug('removing cache entry', { entry: filename })
delete backups[filename]
}
})
}
}
async deleteDeltaVmBackups(backups) {
const handler = this._handler
// this will delete the json, unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
this.#removeVmBackupsFromCache(backups)
}
async deleteMetadataBackup(backupId) {
@@ -281,8 +254,6 @@ class RemoteAdapter {
await asyncMapSettled(backups, ({ _filename, xva }) =>
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
)
this.#removeVmBackupsFromCache(backups)
}
deleteVmBackup(file) {
@@ -290,8 +261,7 @@ class RemoteAdapter {
}
async deleteVmBackups(files) {
const metadatas = await asyncMap(files, file => this.readVmBackupMetadata(file))
const { delta, full, ...others } = groupBy(metadatas, 'mode')
const { delta, full, ...others } = groupBy(await asyncMap(files, file => this.readVmBackupMetadata(file)), 'mode')
const unsupportedModes = Object.keys(others)
if (unsupportedModes.length !== 0) {
@@ -303,13 +273,11 @@ class RemoteAdapter {
full !== undefined && this.deleteFullVmBackups(full),
])
await asyncMap(new Set(files.map(file => dirname(file))), dir =>
// - don't merge in main process, unused VHDs will be merged in the next backup run
// - don't error in case this fails:
// - if lock is already being held, a backup is running and cleanVm will be ran at the end
// - otherwise, there is nothing more we can do, orphan file will be cleaned in the future
this.cleanVm(dir, { remove: true, logWarn: warn }).catch(noop)
)
const dirs = new Set(files.map(file => dirname(file)))
for (const dir of dirs) {
// don't merge in main process, unused VHDs will be merged in the next backup run
await this.cleanVm(dir, { remove: true, onLog: warn })
}
}
#getCompressionType() {
@@ -317,17 +285,14 @@ class RemoteAdapter {
}
#useVhdDirectory() {
return this.handler.useVhdDirectory()
return this.handler.type === 's3'
}
#useAlias() {
return this.#useVhdDirectory()
}
async *#getDiskLegacy(diskId) {
const RE_VHDI = /^vhdi(\d+)$/
async *getDisk(diskId) {
const handler = this._handler
const diskPath = handler._getFilePath('/' + diskId)
@@ -357,20 +322,6 @@ class RemoteAdapter {
}
}
async *getDisk(diskId) {
if(this._useGetDiskLegacy){
yield * this.#getDiskLegacy(diskId)
return
}
const handler = this._handler
// this is a disposable
const mountDir = yield getTmpDir()
// this is also a disposable
yield mount(handler, diskId, mountDir)
// this will yield disk path to caller
yield `${mountDir}/vhd0`
}
// partitionId values:
//
// - undefined: raw disk
@@ -421,25 +372,18 @@ class RemoteAdapter {
listPartitionFiles(diskId, partitionId, path) {
return Disposable.use(this.getPartition(diskId, partitionId), async rootPath => {
path = resolveSubpath(rootPath, path)
const entriesMap = {}
await asyncEach(
await readdir(path),
async name => {
try {
const stats = await lstat(`${path}/${name}`)
if (stats.isDirectory()) {
entriesMap[name + '/'] = {}
} else if (stats.isFile()) {
entriesMap[name] = {}
}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
await asyncMap(await readdir(path), async name => {
try {
const stats = await stat(`${path}/${name}`)
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
},
{ concurrency: 1 }
)
}
})
return entriesMap
})
@@ -504,114 +448,34 @@ class RemoteAdapter {
return backupsByPool
}
#getVmBackupsCache(vmUuid) {
return `${BACKUP_DIR}/${vmUuid}/cache.json.gz`
}
async #readCache(path) {
try {
return JSON.parse(await fromCallback(zlib.gunzip, await this.handler.readFile(path)))
} catch (error) {
if (error.code !== 'ENOENT') {
warn('#readCache', { error, path })
}
}
}
_updateCache = synchronized.withKey()(this._updateCache)
// eslint-disable-next-line no-dupe-class-members
async _updateCache(path, fn) {
const cache = await this.#readCache(path)
if (cache !== undefined) {
fn(cache)
await this.#writeCache(path, cache)
}
}
async #writeCache(path, data) {
try {
await this.handler.writeFile(path, await fromCallback(zlib.gzip, JSON.stringify(data)), { flags: 'w' })
} catch (error) {
warn('#writeCache', { error, path })
}
}
async invalidateVmBackupListCache(vmUuid) {
await this.handler.unlink(this.#getVmBackupsCache(vmUuid))
}
async #getCachabledDataListVmBackups(dir) {
debug('generating cache', { path: dir })
async listVmBackups(vmUuid, predicate) {
const handler = this._handler
const backups = {}
const backups = []
try {
const files = await handler.list(dir, {
const files = await handler.list(`${BACKUP_DIR}/${vmUuid}`, {
filter: isMetadataFile,
prependDir: true,
})
await asyncMap(files, async file => {
try {
const metadata = await this.readVmBackupMetadata(file)
// inject an id usable by importVmBackupNg()
metadata.id = metadata._filename
backups[file] = metadata
if (predicate === undefined || predicate(metadata)) {
// inject an id usable by importVmBackupNg()
metadata.id = metadata._filename
backups.push(metadata)
}
} catch (error) {
warn(`can't read vm backup metadata`, { error, file, dir })
warn(`listVmBackups ${file}`, { error })
}
})
return backups
} catch (error) {
let code
if (error == null || ((code = error.code) !== 'ENOENT' && code !== 'ENOTDIR')) {
throw error
}
}
}
// use _ to mark this method as private by convention
// since we decorate it with synchronized.withKey in the constructor
// and # function are not writeable.
//
// read the list of backup of a Vm from cache
// if cache is missing or broken => regenerate it and return
async _readCacheListVmBackups(vmUuid) {
const path = this.#getVmBackupsCache(vmUuid)
const cache = await this.#readCache(path)
if (cache !== undefined) {
debug('found VM backups cache, using it', { path })
return cache
}
// nothing cached, or cache unreadable => regenerate it
const backups = await this.#getCachabledDataListVmBackups(`${BACKUP_DIR}/${vmUuid}`)
if (backups === undefined) {
return
}
// detached async action, will not reject
this.#writeCache(path, backups)
return backups
}
async listVmBackups(vmUuid, predicate) {
const backups = []
const cached = await this._readCacheListVmBackups(vmUuid)
if (cached === undefined) {
return []
}
Object.values(cached).forEach(metadata => {
if (predicate === undefined || predicate(metadata)) {
backups.push(metadata)
}
})
return backups.sort(compareTimestamp)
}
@@ -637,41 +501,18 @@ class RemoteAdapter {
return backups.sort(compareTimestamp)
}
async writeVmBackupMetadata(vmUuid, metadata) {
const path = `/${BACKUP_DIR}/${vmUuid}/${formatFilenameDate(metadata.timestamp)}.json`
await this.handler.outputFile(path, JSON.stringify(metadata), {
dirMode: this._dirMode,
})
// will not throw
this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
debug('adding cache entry', { entry: path })
backups[path] = {
...metadata,
// these values are required in the cache
_filename: path,
id: path,
}
})
return path
}
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency, nbdClient } = {}) {
async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
const handler = this._handler
if (this.#useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: writeBlockConcurrency,
concurrency: 16,
compression: this.#getCompressionType(),
async validator() {
await input.task
return validator.apply(this, arguments)
},
nbdClient,
})
await VhdAbstract.createAlias(handler, path, dataPath)
} else {
@@ -690,27 +531,46 @@ class RemoteAdapter {
})
}
// open the hierarchy of ancestors until we find a full one
async _createSyntheticStream(handler, path) {
const disposableSynthetic = await VhdSynthetic.fromVhdChain(handler, path)
async _createSyntheticStream(handler, paths) {
let disposableVhds = []
// if it's a path : open all hierarchy of parent
if (typeof paths === 'string') {
let vhd
let vhdPath = paths
do {
const disposable = await openVhd(handler, vhdPath)
vhd = disposable.value
disposableVhds.push(disposable)
vhdPath = resolveRelativeFromFile(vhdPath, vhd.header.parentUnicodeName)
} while (vhd.footer.diskType !== Constants.DISK_TYPES.DYNAMIC)
} else {
// only open the list of path given
disposableVhds = paths.map(path => openVhd(handler, path))
}
// I don't want the vhds to be disposed on return
// but only when the stream is done ( or failed )
const disposables = await Disposable.all(disposableVhds)
const vhds = disposables.value
let disposed = false
const disposeOnce = async () => {
if (!disposed) {
disposed = true
try {
await disposableSynthetic.dispose()
await disposables.dispose()
} catch (error) {
warn('openVhd: failed to dispose VHDs', { error })
warn('_createSyntheticStream: failed to dispose VHDs', { error })
}
}
}
const synthetic = disposableSynthetic.value
const synthetic = new VhdSynthetic(vhds)
await synthetic.readHeaderAndFooter()
await synthetic.readBlockAllocationTable()
const stream = await synthetic.stream()
stream.on('end', disposeOnce)
stream.on('close', disposeOnce)
stream.on('error', disposeOnce)
@@ -743,10 +603,7 @@ class RemoteAdapter {
}
async readVmBackupMetadata(path) {
// _filename is a private field used to compute the backup id
//
// it's enumerable to make it cacheable
return { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
return Object.defineProperty(JSON.parse(await this._handler.readFile(path)), '_filename', { value: path })
}
}

View File

@@ -3,10 +3,8 @@
const CancelToken = require('promise-toolbox/CancelToken')
const Zone = require('node-zone')
const logAfterEnd = log => {
const error = new Error('task has already ended')
error.log = log
throw error
const logAfterEnd = () => {
throw new Error('task has already ended')
}
const noop = Function.prototype

View File

@@ -45,18 +45,7 @@ const forkDeltaExport = deltaExport =>
})
class VmBackup {
constructor({
config,
getSnapshotNameLabel,
healthCheckSr,
job,
remoteAdapters,
remotes,
schedule,
settings,
srs,
vm,
}) {
constructor({ config, getSnapshotNameLabel, job, remoteAdapters, remotes, schedule, settings, srs, vm }) {
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
// don't match replicated VMs created by this very job otherwise they
// will be replicated again and again
@@ -66,6 +55,7 @@ class VmBackup {
this.config = config
this.job = job
this.remoteAdapters = remoteAdapters
this.remotes = remotes
this.scheduleId = schedule.id
this.timestamp = undefined
@@ -79,7 +69,6 @@ class VmBackup {
this._fullVdisRequired = undefined
this._getSnapshotNameLabel = getSnapshotNameLabel
this._isDelta = job.mode === 'delta'
this._healthCheckSr = healthCheckSr
this._jobId = job.id
this._jobSnapshots = undefined
this._xapi = vm.$xapi
@@ -106,6 +95,7 @@ class VmBackup {
: [FullBackupWriter, FullReplicationWriter]
const allSettings = job.settings
Object.keys(remoteAdapters).forEach(remoteId => {
const targetSettings = {
...settings,
@@ -128,49 +118,35 @@ class VmBackup {
}
// calls fn for each function, warns of any errors, and throws only if there are no writers left
async _callWriters(fn, step, parallel = true) {
async _callWriters(fn, warnMessage, parallel = true) {
const writers = this._writers
const n = writers.size
if (n === 0) {
return
}
async function callWriter(writer) {
const { name } = writer.constructor
try {
debug('writer step starting', { step, writer: name })
await fn(writer)
debug('writer step succeeded', { duration: step, writer: name })
} catch (error) {
writers.delete(writer)
warn('writer step failed', { error, step, writer: name })
// these two steps are the only one that are not already in their own sub tasks
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
Task.warning(
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
)
}
throw error
}
}
if (n === 1) {
const [writer] = writers
return callWriter(writer)
try {
await fn(writer)
} catch (error) {
writers.delete(writer)
throw error
}
return
}
const errors = []
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
try {
await callWriter(writer)
await fn(writer)
} catch (error) {
errors.push(error)
this.delete(writer)
warn(warnMessage, { error, writer: writer.constructor.name })
}
})
if (writers.size === 0) {
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
throw new AggregateError(errors, 'all targets have failed, step: ' + warnMessage)
}
}
@@ -197,10 +173,7 @@ class VmBackup {
const settings = this._settings
const doSnapshot =
settings.unconditionalSnapshot ||
this._isDelta ||
(!settings.offlineBackup && vm.power_state === 'Running') ||
settings.snapshotRetention !== 0
this._isDelta || (!settings.offlineBackup && vm.power_state === 'Running') || settings.snapshotRetention !== 0
if (doSnapshot) {
await Task.run({ name: 'snapshot' }, async () => {
if (!settings.bypassVdiChainsCheck) {
@@ -210,7 +183,6 @@ class VmBackup {
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot']({
ignoreNobakVdis: true,
name_label: this._getSnapshotNameLabel(vm),
unplugVusbs: true,
})
this.timestamp = Date.now()
@@ -332,17 +304,22 @@ class VmBackup {
}
async _removeUnusedSnapshots() {
const allSettings = this.job.settings
const baseSettings = this._baseSettings
const jobSettings = this.job.settings
const baseVmRef = this._baseVm?.$ref
const { config } = this
const baseSettings = {
...config.defaultSettings,
...config.metadata.defaultSettings,
...jobSettings[''],
}
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
const xapi = this._xapi
await asyncMap(Object.entries(snapshotsPerSchedule), ([scheduleId, snapshots]) => {
const settings = {
...baseSettings,
...allSettings[scheduleId],
...allSettings[this.vm.uuid],
...jobSettings[scheduleId],
...jobSettings[this.vm.uuid],
}
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
if ($ref !== baseVmRef) {
@@ -421,24 +398,6 @@ class VmBackup {
this._fullVdisRequired = fullVdisRequired
}
async _healthCheck() {
const settings = this._settings
if (this._healthCheckSr === undefined) {
return
}
// check if current VM has tags
const { tags } = this.vm
const intersect = settings.healthCheckVmsWithTags.some(t => tags.includes(t))
if (settings.healthCheckVmsWithTags.length !== 0 && !intersect) {
return
}
await this._callWriters(writer => writer.healthCheck(this._healthCheckSr), 'writer.healthCheck()')
}
async run($defer) {
const settings = this._settings
assert(
@@ -448,9 +407,7 @@ class VmBackup {
await this._callWriters(async writer => {
await writer.beforeBackup()
$defer(async () => {
await writer.afterBackup()
})
$defer(() => writer.afterBackup())
}, 'writer.beforeBackup()')
await this._fetchJobSnapshots()
@@ -486,7 +443,6 @@ class VmBackup {
await this._fetchJobSnapshots()
await this._removeUnusedSnapshots()
}
await this._healthCheck()
}
}
exports.VmBackup = VmBackup

View File

@@ -5,9 +5,9 @@
const rimraf = require('rimraf')
const tmp = require('tmp')
const fs = require('fs-extra')
const uuid = require('uuid')
const { getHandler } = require('@xen-orchestra/fs')
const { pFromCallback } = require('promise-toolbox')
const crypto = require('crypto')
const { RemoteAdapter } = require('./RemoteAdapter')
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
@@ -34,8 +34,7 @@ afterEach(async () => {
await handler.forget()
})
const uniqueId = () => uuid.v1()
const uniqueIdBuffer = () => uuid.v1({}, Buffer.alloc(16))
const uniqueId = () => crypto.randomBytes(16).toString('hex')
async function generateVhd(path, opts = {}) {
let vhd
@@ -54,9 +53,10 @@ async function generateVhd(path, opts = {}) {
}
vhd.header = { ...VHDHEADER, ...opts.header }
vhd.footer = { ...VHDFOOTER, ...opts.footer, uuid: uniqueIdBuffer() }
vhd.footer = { ...VHDFOOTER, ...opts.footer }
vhd.footer.uuid = Buffer.from(crypto.randomBytes(16))
if (vhd.header.parentUuid) {
if (vhd.header.parentUnicodeName) {
vhd.footer.diskType = Constants.DISK_TYPES.DIFFERENCING
} else {
vhd.footer.diskType = Constants.DISK_TYPES.DYNAMIC
@@ -78,53 +78,48 @@ test('It remove broken vhd', async () => {
await handler.writeFile(`${basePath}/notReallyAVhd.vhd`, 'I AM NOT A VHD')
expect((await handler.list(basePath)).length).toEqual(1)
let loggued = ''
const logInfo = message => {
const onLog = message => {
loggued += message
}
await adapter.cleanVm('/', { remove: false, logInfo, logWarn: logInfo, lock: false })
expect(loggued).toEqual(`VHD check error`)
await adapter.cleanVm('/', { remove: false, onLog })
expect(loggued).toEqual(`error while checking the VHD with path /${basePath}/notReallyAVhd.vhd`)
// not removed
expect((await handler.list(basePath)).length).toEqual(1)
// really remove it
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true, onLog })
expect((await handler.list(basePath)).length).toEqual(0)
})
test('it remove vhd with missing or multiple ancestors', async () => {
// one with a broken parent, should be deleted
// one with a broken parent
await generateVhd(`${basePath}/abandonned.vhd`, {
header: {
parentUnicodeName: 'gone.vhd',
parentUuid: uniqueIdBuffer(),
parentUid: Buffer.from(crypto.randomBytes(16)),
},
})
// one orphan, which is a full vhd, no parent : should stay
// one orphan, which is a full vhd, no parent
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan in the metadata : should stay
// a child to the orphan
await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUuid: orphan.footer.uuid,
parentUid: orphan.footer.uuid,
},
})
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [`${basePath}/child.vhd`, `${basePath}/abandonned.vhd`],
}),
{ flags: 'w' }
)
// clean
let loggued = ''
const logInfo = message => {
const onLog = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
await adapter.cleanVm('/', { remove: true, onLog })
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
const deletedAbandonnedVhd = loggued.match(/abandonned.vhd is missing/g) || []
expect(deletedAbandonnedVhd.length).toEqual(1) // and it must be abandonned.vhd
// we don't test the filew on disk, since they will all be marker as unused and deleted without a metadata.json file
})
@@ -152,17 +147,19 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUuid: orphan.footer.uuid,
parentUid: orphan.footer.uuid,
},
})
let loggued = ''
const logInfo = message => {
const onLog = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
let matched = loggued.match(/deleting unused VHD/g) || []
await adapter.cleanVm('/', { remove: true, onLog })
let matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(1) // only one vhd should have been deleted
matched = loggued.match(/abandonned.vhd is unused/g) || []
expect(matched.length).toEqual(1) // and it must be abandonned.vhd
// a missing vhd cause clean to remove all vhds
await handler.writeFile(
@@ -179,8 +176,8 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
{ flags: 'w' }
)
loggued = ''
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
matched = loggued.match(/deleting unused VHD/g) || []
await adapter.cleanVm('/', { remove: true, onLog })
matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
})
@@ -204,28 +201,30 @@ test('it merges delta of non destroyed chain', async () => {
const child = await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUuid: orphan.footer.uuid,
parentUid: orphan.footer.uuid,
},
})
// a grand child
await generateVhd(`${basePath}/grandchild.vhd`, {
header: {
parentUnicodeName: 'child.vhd',
parentUuid: child.footer.uuid,
parentUid: child.footer.uuid,
},
})
let loggued = []
const logInfo = message => {
const onLog = message => {
loggued.push(message)
}
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
expect(loggued[0]).toEqual(`incorrect backup size in metadata`)
await adapter.cleanVm('/', { remove: true, onLog })
expect(loggued[0]).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`)
expect(loggued[1]).toEqual(`incorrect size in metadata: 12000 instead of 209920`)
loggued = []
await adapter.cleanVm('/', { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
const [merging] = loggued
expect(merging).toEqual(`merging VHD chain`)
await adapter.cleanVm('/', { remove: true, merge: true, onLog })
const [unused, merging] = loggued
expect(unused).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`)
expect(merging).toEqual(`merging /${basePath}/child.vhd into /${basePath}/orphan.vhd`)
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children after the merge
@@ -255,7 +254,7 @@ test('it finish unterminated merge ', async () => {
const child = await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUuid: orphan.footer.uuid,
parentUid: orphan.footer.uuid,
},
})
// a merge in progress file
@@ -271,7 +270,7 @@ test('it finish unterminated merge ', async () => {
})
)
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true, merge: true })
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
@@ -311,7 +310,7 @@ describe('tests multiple combination ', () => {
mode: vhdMode,
header: {
parentUnicodeName: 'gone.vhd',
parentUuid: uniqueIdBuffer(),
parentUid: crypto.randomBytes(16),
},
})
@@ -325,7 +324,7 @@ describe('tests multiple combination ', () => {
mode: vhdMode,
header: {
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUuid: ancestor.footer.uuid,
parentUid: ancestor.footer.uuid,
},
})
// a grand child vhd in metadata
@@ -334,7 +333,7 @@ describe('tests multiple combination ', () => {
mode: vhdMode,
header: {
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUuid: child.footer.uuid,
parentUid: child.footer.uuid,
},
})
@@ -349,7 +348,7 @@ describe('tests multiple combination ', () => {
mode: vhdMode,
header: {
parentUnicodeName: 'cleanAncestor.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUuid: cleanAncestor.footer.uuid,
parentUid: cleanAncestor.footer.uuid,
},
})
@@ -378,7 +377,7 @@ describe('tests multiple combination ', () => {
})
)
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true, merge: true })
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children + clean after the merge
@@ -414,7 +413,7 @@ describe('tests multiple combination ', () => {
test('it cleans orphan merge states ', async () => {
await handler.writeFile(`${basePath}/.orphan.vhd.merge.json`, '')
await adapter.cleanVm('/', { remove: true, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true })
expect(await handler.list(basePath)).toEqual([])
})
@@ -429,11 +428,7 @@ test('check Aliases should work alone', async () => {
await generateVhd(`vhds/data/missingalias.vhd`)
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', {
remove: true,
handler,
logWarn: () => {},
})
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', { remove: true, handler })
// only ok have suvived
const alias = (await handler.list('vhds')).filter(f => f.endsWith('.vhd'))

View File

@@ -1,27 +1,22 @@
'use strict'
const assert = require('assert')
const sum = require('lodash/sum')
const UUID = require('uuid')
const { asyncMap } = require('@xen-orchestra/async-map')
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { Constants, mergeVhd, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
const { dirname, resolve } = require('path')
const { DISK_TYPES } = Constants
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { mergeVhdChain } = require('vhd-lib/merge')
const { Task } = require('./Task.js')
const { Disposable } = require('promise-toolbox')
const handlerPath = require('@xen-orchestra/fs/path')
// checking the size of a vhd directory is costly
// 1 Http Query per 1000 blocks
// we only check size of all the vhd are VhdFiles
function shouldComputeVhdsSize(handler, vhds) {
if (handler.isEncrypted) {
return false
}
function shouldComputeVhdsSize(vhds) {
return vhds.every(vhd => vhd instanceof VhdFile)
}
@@ -29,49 +24,86 @@ const computeVhdsSize = (handler, vhdPaths) =>
Disposable.use(
vhdPaths.map(vhdPath => openVhd(handler, vhdPath)),
async vhds => {
if (shouldComputeVhdsSize(handler, vhds)) {
if (shouldComputeVhdsSize(vhds)) {
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
return sum(sizes)
}
}
)
// chain is [ ancestor, child_1, ..., child_n ]
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
// chain is an array of VHDs from child to parent
//
// the whole chain will be merged into parent, parent will be renamed to child
// and all the others will deleted
async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
assert(chain.length >= 2)
let child = chain[0]
const parent = chain[chain.length - 1]
const children = chain.slice(0, -1).reverse()
chain
.slice(1)
.reverse()
.forEach(parent => {
onLog(`the parent ${parent} of the child ${child} is unused`)
})
if (merge) {
logInfo(`merging VHD chain`, { chain })
// `mergeVhd` does not work with a stream, either
// - make it accept a stream
// - or create synthetic VHD which is not a stream
if (children.length !== 1) {
// TODO: implement merging multiple children
children.length = 1
child = children[0]
}
onLog(`merging ${child} into ${parent}`)
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
logInfo('merge in progress', {
done,
parent: chain[0],
progress: Math.round((100 * done) / total),
total,
})
onLog(`merging ${child}: ${done}/${total}`)
}
}, 10e3)
try {
return await mergeVhdChain(handler, chain, {
logInfo,
mergeBlockConcurrency,
const mergedSize = await mergeVhd(
handler,
parent,
handler,
child,
// children.length === 1
// ? child
// : await createSyntheticStream(handler, children),
{
onProgress({ done: d, total: t }) {
done = d
total = t
},
removeUnused: remove,
})
} finally {
clearInterval(handle)
}
}
)
clearInterval(handle)
await Promise.all([
VhdAbstract.rename(handler, parent, child),
asyncMap(children.slice(0, -1), child => {
onLog(`the VHD ${child} is unused`)
if (remove) {
onLog(`deleting unused VHD ${child}`)
return VhdAbstract.unlink(handler, child)
}
}),
])
return mergedSize
}
}
const noop = Function.prototype
const INTERRUPTED_VHDS_REG = /^\.(.+)\.merge.json$/
const listVhds = async (handler, vmDir, logWarn) => {
const listVhds = async (handler, vmDir) => {
const vhds = new Set()
const aliases = {}
const interruptedVhds = new Map()
@@ -91,23 +123,12 @@ const listVhds = async (handler, vmDir, logWarn) => {
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
})
aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd)).map(file => `${vdiDir}/${file}`)
await asyncMap(list, async file => {
list.forEach(file => {
const res = INTERRUPTED_VHDS_REG.exec(file)
if (res === null) {
vhds.add(`${vdiDir}/${file}`)
} else {
try {
const mergeState = JSON.parse(await handler.readFile(`${vdiDir}/${file}`))
interruptedVhds.set(`${vdiDir}/${res[1]}`, {
statePath: `${vdiDir}/${file}`,
chain: mergeState.chain,
})
} catch (error) {
// fall back to a non resuming merge
vhds.add(`${vdiDir}/${file}`)
logWarn('failed to read existing merge state', { path: file, error })
}
interruptedVhds.set(`${vdiDir}/${res[1]}`, `${vdiDir}/${file}`)
}
})
}
@@ -117,21 +138,16 @@ const listVhds = async (handler, vmDir, logWarn) => {
return { vhds, interruptedVhds, aliases }
}
async function checkAliases(
aliasPaths,
targetDataRepository,
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
) {
async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog = noop, remove = false }) {
const aliasFound = []
for (const alias of aliasPaths) {
const target = await resolveVhdAlias(handler, alias)
for (const path of aliasPaths) {
const target = await resolveVhdAlias(handler, path)
if (!isVhdFile(target)) {
logWarn('alias references non VHD target', { alias, target })
onLog(`Alias ${path} references a non vhd target: ${target}`)
if (remove) {
logInfo('removing alias and non VHD target', { alias, target })
await handler.unlink(target)
await handler.unlink(alias)
await handler.unlink(path)
}
continue
}
@@ -144,13 +160,13 @@ async function checkAliases(
// error during dispose should not trigger a deletion
}
} catch (error) {
logWarn('missing or broken alias target', { alias, target, error })
onLog(`target ${target} of alias ${path} is missing or broken`, { error })
if (remove) {
try {
await VhdAbstract.unlink(handler, alias)
} catch (error) {
if (error.code !== 'ENOENT') {
logWarn('error deleting alias target', { alias, target, error })
await VhdAbstract.unlink(handler, path)
} catch (e) {
if (e.code !== 'ENOENT') {
onLog(`Error while deleting target ${target} of alias ${path}`, { error: e })
}
}
}
@@ -160,48 +176,37 @@ async function checkAliases(
aliasFound.push(resolve('/', target))
}
const vhds = await handler.list(targetDataRepository, {
const entries = await handler.list(targetDataRepository, {
ignoreMissing: true,
prependDir: true,
})
await asyncMap(vhds, async path => {
if (!aliasFound.includes(path)) {
logWarn('no alias references VHD', { path })
entries.forEach(async entry => {
if (!aliasFound.includes(entry)) {
onLog(`the Vhd ${entry} is not referenced by a an alias`)
if (remove) {
logInfo('deleting unused VHD', { path })
await VhdAbstract.unlink(handler, path)
await VhdAbstract.unlink(handler, entry)
}
}
})
}
exports.checkAliases = checkAliases
const defaultMergeLimiter = limitConcurrency(1)
exports.cleanVm = async function cleanVm(
vmDir,
{
fixMetadata,
remove,
merge,
mergeBlockConcurrency,
mergeLimiter = defaultMergeLimiter,
logInfo = noop,
logWarn = console.warn,
}
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, onLog = noop }
) {
const limitedMergeVhdChain = mergeLimiter(_mergeVhdChain)
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
const handler = this._handler
const vhdsToJSons = new Set()
const vhdById = new Map()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir, logWarn)
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir)
// remove broken VHDs
await asyncMap(vhds, async path => {
@@ -219,31 +224,12 @@ exports.cleanVm = async function cleanVm(
}
vhdChildren[parent] = path
}
// Detect VHDs with the same UUIDs
//
// Due to a bug introduced in a1bcd35e2
const duplicate = vhdById.get(UUID.stringify(vhd.footer.uuid))
let vhdKept = vhd
if (duplicate !== undefined) {
logWarn('uuid is duplicated', { uuid: UUID.stringify(vhd.footer.uuid) })
if (duplicate.containsAllDataOf(vhd)) {
logWarn(`should delete ${path}`)
vhdKept = duplicate
vhds.delete(path)
} else if (vhd.containsAllDataOf(duplicate)) {
logWarn(`should delete ${duplicate._path}`)
vhds.delete(duplicate._path)
} else {
logWarn('same ids but different content')
}
}
vhdById.set(UUID.stringify(vhdKept.footer.uuid), vhdKept)
})
} catch (error) {
vhds.delete(path)
logWarn('VHD check error', { path, error })
onLog(`error while checking the VHD with path ${path}`, { error })
if (error?.code === 'ERR_ASSERTION' && remove) {
logInfo('deleting broken VHD', { path })
onLog(`deleting broken ${path}`)
return VhdAbstract.unlink(handler, path)
}
}
@@ -252,15 +238,15 @@ exports.cleanVm = async function cleanVm(
// remove interrupted merge states for missing VHDs
for (const interruptedVhd of interruptedVhds.keys()) {
if (!vhds.has(interruptedVhd)) {
const { statePath } = interruptedVhds.get(interruptedVhd)
const statePath = interruptedVhds.get(interruptedVhd)
interruptedVhds.delete(interruptedVhd)
logWarn('orphan merge state', {
onLog('orphan merge state', {
mergeStatePath: statePath,
missingVhdPath: interruptedVhd,
})
if (remove) {
logInfo('deleting orphan merge state', { statePath })
onLog(`deleting orphan merge state ${statePath}`)
await handler.unlink(statePath)
}
}
@@ -269,7 +255,7 @@ exports.cleanVm = async function cleanVm(
// check if alias are correct
// check if all vhd in data subfolder have a corresponding alias
await asyncMap(Object.keys(aliases), async dir => {
await checkAliases(aliases[dir], `${dir}/data`, { handler, logInfo, logWarn, remove })
await checkAliases(aliases[dir], `${dir}/data`, { handler, onLog, remove })
})
// remove VHDs with missing ancestors
@@ -291,9 +277,9 @@ exports.cleanVm = async function cleanVm(
if (!vhds.has(parent)) {
vhds.delete(vhdPath)
logWarn('parent VHD is missing', { parent, child: vhdPath })
onLog(`the parent ${parent} of the VHD ${vhdPath} is missing`)
if (remove) {
logInfo('deleting orphan VHD', { path: vhdPath })
onLog(`deleting orphan VHD ${vhdPath}`)
deletions.push(VhdAbstract.unlink(handler, vhdPath))
}
}
@@ -311,7 +297,6 @@ exports.cleanVm = async function cleanVm(
}
const jsons = new Set()
let mustInvalidateCache = false
const xvas = new Set()
const xvaSums = []
const entries = await handler.list(vmDir, {
@@ -331,7 +316,7 @@ exports.cleanVm = async function cleanVm(
// check is not good enough to delete the file, the best we can do is report
// it
if (!(await this.isValidXva(path))) {
logWarn('XVA might be broken', { path })
onLog(`the XVA with path ${path} is potentially broken`)
}
})
@@ -345,7 +330,7 @@ exports.cleanVm = async function cleanVm(
try {
metadata = JSON.parse(await handler.readFile(json))
} catch (error) {
logWarn('failed to read backup metadata', { path: json, error })
onLog(`failed to read metadata file ${json}`, { error })
jsons.delete(json)
return
}
@@ -356,11 +341,10 @@ exports.cleanVm = async function cleanVm(
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
} else {
logWarn('the XVA linked to the backup is missing', { backup: json, xva: linkedXva })
onLog(`the XVA linked to the metadata ${json} is missing`)
if (remove) {
logInfo('deleting incomplete backup', { path: json })
onLog(`deleting incomplete backup ${json}`)
jsons.delete(json)
mustInvalidateCache = true
await handler.unlink(json)
}
}
@@ -380,10 +364,9 @@ exports.cleanVm = async function cleanVm(
vhdsToJSons[path] = json
})
} else {
logWarn('some VHDs linked to the backup are missing', { backup: json, missingVhds })
onLog(`Some VHDs linked to the metadata ${json} are missing`, { missingVhds })
if (remove) {
logInfo('deleting incomplete backup', { path: json })
mustInvalidateCache = true
onLog(`deleting incomplete backup ${json}`)
jsons.delete(json)
await handler.unlink(json)
}
@@ -395,7 +378,7 @@ exports.cleanVm = async function cleanVm(
const unusedVhdsDeletion = []
const toMerge = []
{
// VHD chains (as list from oldest to most recent) to merge indexed by most recent
// VHD chains (as list from child to ancestor) to merge indexed by last
// ancestor
const vhdChainsToMerge = { __proto__: null }
@@ -419,14 +402,14 @@ exports.cleanVm = async function cleanVm(
if (child !== undefined) {
const chain = getUsedChildChainOrDelete(child)
if (chain !== undefined) {
chain.unshift(vhd)
chain.push(vhd)
return chain
}
}
logWarn('unused VHD', { path: vhd })
onLog(`the VHD ${vhd} is unused`)
if (remove) {
logInfo('deleting unused VHD', { path: vhd })
onLog(`deleting unused VHD ${vhd}`)
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
}
}
@@ -437,13 +420,7 @@ exports.cleanVm = async function cleanVm(
// merge interrupted VHDs
for (const parent of interruptedVhds.keys()) {
// before #6349 the chain wasn't in the mergeState
const { chain, statePath } = interruptedVhds.get(parent)
if (chain === undefined) {
vhdChainsToMerge[parent] = [parent, vhdChildren[parent]]
} else {
vhdChainsToMerge[parent] = chain.map(vhdPath => handlerPath.resolveFromFile(statePath, vhdPath))
}
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
}
Object.values(vhdChainsToMerge).forEach(chain => {
@@ -456,15 +433,9 @@ exports.cleanVm = async function cleanVm(
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(handler, chain, {
logInfo,
logWarn,
remove,
merge,
mergeBlockConcurrency,
})
const merged = await limitedMergeVhdChain(chain, { handler, onLog, remove, merge })
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
}
})
@@ -474,18 +445,18 @@ exports.cleanVm = async function cleanVm(
...unusedVhdsDeletion,
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
asyncMap(unusedXvas, path => {
logWarn('unused XVA', { path })
onLog(`the XVA ${path} is unused`)
if (remove) {
logInfo('deleting unused XVA', { path })
onLog(`deleting unused XVA ${path}`)
return handler.unlink(path)
}
}),
asyncMap(xvaSums, path => {
// no need to handle checksums for XVAs deleted by the script, they will be handled by `unlink()`
if (!xvas.has(path.slice(0, -'.checksum'.length))) {
logInfo('unused XVA checksum', { path })
onLog(`the XVA checksum ${path} is unused`)
if (remove) {
logInfo('deleting unused XVA checksum', { path })
onLog(`deleting unused XVA checksum ${path}`)
return handler.unlink(path)
}
}
@@ -507,11 +478,7 @@ exports.cleanVm = async function cleanVm(
if (mode === 'full') {
// a full backup : check size
const linkedXva = resolve('/', vmDir, xva)
try {
fileSystemSize = await handler.getSize(linkedXva)
} catch (error) {
// can fail with encrypted remote
}
fileSystemSize = await handler.getSize(linkedXva)
} else if (mode === 'delta') {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
@@ -523,15 +490,11 @@ exports.cleanVm = async function cleanVm(
// don't warn if the size has changed after a merge
if (!merged && fileSystemSize !== size) {
logWarn('incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
onLog(`incorrect size in metadata: ${size ?? 'none'} instead of ${fileSystemSize}`)
}
}
} catch (error) {
logWarn('failed to get backup size', { backup: metadataPath, error })
onLog(`failed to get size of ${metadataPath}`, { error })
return
}
@@ -541,16 +504,11 @@ exports.cleanVm = async function cleanVm(
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
logWarn('failed to update backup size in metadata', { path: metadataPath, error })
onLog(`failed to update size in backup metadata ${metadataPath} after merge`, { error })
}
}
})
// purge cache if a metadata file has been deleted
if (mustInvalidateCache) {
await handler.unlink(vmDir + '/cache.json.gz')
}
return {
// boolean whether some VHDs were merged (or should be merged)
merge: toMerge.length !== 0,

View File

@@ -1,12 +1,12 @@
'use strict'
const compareVersions = require('compare-versions')
const find = require('lodash/find.js')
const groupBy = require('lodash/groupBy.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const omit = require('lodash/omit.js')
const { asyncMap } = require('@xen-orchestra/async-map')
const { CancelToken } = require('promise-toolbox')
const { compareVersions } = require('compare-versions')
const { createVhdStreamWithLength } = require('vhd-lib')
const { defer } = require('golike-defer')

View File

@@ -3,8 +3,6 @@
const eos = require('end-of-stream')
const { PassThrough } = require('stream')
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
// create a new readable stream from an existing one which may be piped later
//
// in case of error in the new readable stream, it will simply be unpiped
@@ -13,23 +11,18 @@ exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
const { forks = 0 } = stream
stream.forks = forks + 1
debug('forking', { forks: stream.forks })
const proxy = new PassThrough()
stream.pipe(proxy)
eos(stream, error => {
if (error !== undefined) {
debug('error on original stream, destroying fork', { error })
proxy.destroy(error)
}
})
eos(proxy, error => {
debug('end of stream, unpiping', { error, forks: --stream.forks })
eos(proxy, _ => {
stream.forks--
stream.unpipe(proxy)
if (stream.forks === 0) {
debug('no more forks, destroying original stream')
stream.destroy(new Error('no more consumers for this stream'))
}
})

View File

@@ -49,11 +49,6 @@ const isValidTar = async (handler, size, fd) => {
// TODO: find an heuristic for compressed files
async function isValidXva(path) {
const handler = this._handler
// size is longer when encrypted + reading part of an encrypted file is not implemented
if (handler.isEncrypted) {
return true
}
try {
const fd = await handler.openFile(path, 'r')
try {
@@ -71,6 +66,7 @@ async function isValidXva(path) {
}
} catch (error) {
// never throw, log and report as valid to avoid side effects
console.error('isValidXva', path, error)
return true
}
}

View File

@@ -6,22 +6,15 @@
- [Task logs](#task-logs)
- [During backup](#during-backup)
- [During restoration](#during-restoration)
- [API](#api)
- [Run description object](#run-description-object)
- [`IdPattern`](#idpattern)
- [Settings](#settings)
- [Writer API](#writer-api)
## File structure on remote
### with vhd files
```
<remote>
└─ xo-vm-backups
├─ index.json // TODO
└─ <VM UUID>
├─ cache.json.gz
├─ index.json // TODO
├─ vdis
│ └─ <job UUID>
│ └─ <VDI UUID>
@@ -32,31 +25,6 @@
└─ <YYYYMMDD>T<HHmmss>.xva.checksum
```
### with vhd directories
When `useVhdDirectory` is enabled on the remote, the directory containing the VHDs has a slightly different architecture:
```
<vdis>/<job UUID>/<VDI UUID>
├─ <YYYYMMDD>T<HHmmss>.alias.vhd // contains the relative path to a VHD directory
├─ <YYYYMMDD>T<HHmmss>.alias.vhd
└─ data
├─ <uuid>.vhd // VHD directory format is described in vhd-lib/Vhd/VhdDirectory.js
└─ <uuid>.vhd
```
## Cache for a VM
In a VM directory, if the file `cache.json.gz` exists, it contains the metadata for all the backups for this VM.
Add the following file: `xo-vm-backups/<VM UUID>/cache.json.gz`.
This cache is compressed in Gzip and contains an JSON object with the metadata for all the backups of this VM indexed by their absolute path (i.e. `/xo-vm-backups/<VM UUID>/<timestamp>.json`).
This file is generated on demande when listing the backups, and directly updated on backup creation/deletion.
In case any incoherence is detected, the file is deleted so it will be fully generated when required.
## Attributes
### Of created snapshots
@@ -96,30 +64,24 @@ job.start(data: { mode: Mode, reportWhen: ReportWhen })
├─ task.warning(message: string)
├─ task.start(data: { type: 'VM', id: string })
│ ├─ task.warning(message: string)
| ├─ task.start(message: 'clean-vm')
│ │ └─ task.end
│ ├─ task.start(message: 'snapshot')
│ │ └─ task.end
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, isFull: boolean })
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string })
│ │ ├─ task.warning(message: string)
│ │ ├─ task.start(message: 'transfer')
│ │ │ ├─ task.warning(message: string)
│ │ │ └─ task.end(result: { size: number })
│ │ │
│ │ │ // in case there is a healthcheck scheduled for this vm in this job
│ │ ├─ task.start(message: 'health check')
│ │ │ ├─ task.start(message: 'transfer')
│ │ │ │ └─ task.end(result: { size: number })
│ │ │ ├─ task.start(message: 'vmstart')
│ │ │ │ └─ task.end
│ │ │ └─ task.end
│ │ │
│ │ │ // in case of full backup, DR and CR
│ │ ├─ task.start(message: 'clean')
│ │ │ ├─ task.warning(message: string)
│ │ │ └─ task.end
│ │ └─ task.end
| ├─ task.start(message: 'clean-vm')
│ │
│ │ │ // in case of delta backup
│ │ ├─ task.start(message: 'merge')
│ │ │ ├─ task.warning(message: string)
│ │ │ └─ task.end(result: { size: number })
│ │ │
│ │ └─ task.end
│ └─ task.end
└─ job.end
@@ -133,102 +95,3 @@ task.start(message: 'restore', data: { jobId: string, srId: string, time: number
│ └─ task.end(result: { id: string, size: number })
└─ task.end
```
## API
### Run description object
This is a JavaScript object containing all the information necessary to run a backup job.
```coffee
# Information about the job itself
job:
# Unique identifier
id: string
# Human readable identifier
name: string
# Whether this job is doing Full Backup / Disaster Recovery or
# Delta Backup / Continuous Replication
mode: 'full' | 'delta'
# For backup jobs, indicates which remotes to use
remotes: IdPattern
settings:
# Used for the whole job
'': Settings
# Used for a specific schedule
[ScheduleId]: Settings
# Used for a specific VM
[VmId]: Settings
# For replication jobs, indicates which SRs to use
srs: IdPattern
# Here for historical reasons
type: 'backup'
# Indicates which VMs to backup/replicate
vms: IdPattern
# Indicates which XAPI to use to connect to a specific VM or SR
recordToXapi:
[ObjectId]: XapiId
# Information necessary to connect to each remote
remotes:
[RemoteId]:
url: string
# Indicates which schedule is used for this run
schedule:
id: ScheduleId
# Information necessary to connect to each XAPI
xapis:
[XapiId]:
allowUnauthorized: boolean
credentials:
password: string
username: string
url: string
```
### `IdPattern`
For a single object:
```
{ id: string }
```
For multiple objects:
```
{ id: { __or: string[] } }
```
> This syntax is compatible with [`value-matcher`](https://github.com/vatesfr/xen-orchestra/tree/master/packages/value-matcher).
### Settings
Settings are described in [`@xen-orchestra/backups/Backup.js](https://github.com/vatesfr/xen-orchestra/blob/master/%40xen-orchestra/backups/Backup.js).
## Writer API
- `beforeBackup()`
- **Delta**
- `checkBaseVdis(baseUuidToSrcVdi, baseVm)`
- `prepare({ isFull })`
- `transfer({ timestamp, deltaExport, sizeContainers })`
- `cleanup()`
- `healthCheck(sr)`
- **Full**
- `run({ timestamp, sizeContainer, stream })`
- `afterBackup()`

View File

@@ -1,6 +1,4 @@
#!/usr/bin/env node
// eslint-disable-next-line eslint-comments/disable-enable-pair
/* eslint-disable n/shebang */
'use strict'
@@ -64,7 +62,7 @@ const main = Disposable.wrap(async function* main(args) {
try {
const vmDir = getVmBackupDir(String(await handler.readFile(taskFile)))
try {
await adapter.cleanVm(vmDir, { merge: true, logInfo: info, logWarn: warn, remove: true })
await adapter.cleanVm(vmDir, { merge: true, onLog: info, remove: true })
} catch (error) {
// consider the clean successful if the VM dir is missing
if (error.code !== 'ENOENT') {

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.28.0",
"version": "0.22.0",
"engines": {
"node": ">=14.6"
},
@@ -16,20 +16,17 @@
"postversion": "npm publish --access public"
},
"dependencies": {
"@vates/async-each": "^1.0.0",
"@vates/cached-dns.lookup": "^1.0.0",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.1",
"@vates/fuse-vhd": "^1.0.0",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^3.1.0",
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^5.0.1",
"compare-versions": "^4.0.1",
"d3-time-format": "^3.0.0",
"decorator-synchronized": "^0.6.0",
"end-of-stream": "^1.4.4",
"fs-extra": "^10.0.0",
"golike-defer": "^0.5.1",
@@ -39,8 +36,8 @@
"parse-pairs": "^1.1.0",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"uuid": "^9.0.0",
"vhd-lib": "^4.1.0",
"uuid": "^8.3.2",
"vhd-lib": "^3.1.0",
"yazl": "^2.5.1"
},
"devDependencies": {
@@ -48,7 +45,7 @@
"tmp": "^0.2.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^1.5.0"
"@xen-orchestra/xapi": "^0.11.0"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -19,7 +19,7 @@ const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
const { checkVhd } = require('./_checkVhd.js')
const { packUuid } = require('./_packUuid.js')
const { Disposable } = require('promise-toolbox')
const NbdClient = require('nbd-client')
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
@@ -36,7 +36,6 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
try {
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
ignoreMissing: true,
prependDir: true,
})
const packedBaseUuid = packUuid(baseUuid)
@@ -81,9 +80,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
},
})
this.transfer = task.wrapFn(this.transfer)
this.healthCheck = task.wrapFn(this.healthCheck)
this.cleanup = task.wrapFn(this.cleanup)
this.afterBackup = task.wrapFn(this.afterBackup, true)
this.cleanup = task.wrapFn(this.cleanup, true)
return task.run(() => this._prepare())
}
@@ -159,6 +156,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}/${adapter.getVhdFileName(basename)}`
)
const metadataFilename = `${backupDir}/${basename}.json`
const metadataContent = {
jobId,
mode: job.mode,
@@ -199,24 +197,11 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
await checkVhd(handler, parentPath)
}
// get nbd if possible
const vdiRef = vm.$xapi.getObject(vdi.uuid).$ref
let nbdClient
try {
const [nbdInfo] = await vm.$xapi.call('VDI.get_nbd_info', vdiRef)
nbdClient = new NbdClient(nbdInfo)
await nbdClient.connect()
} catch (e) {
console.log('NBD error', e)
}
await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._backup.config.writeBlockConcurrency,
nbdClient,
})
if (isDelta) {
@@ -236,7 +221,9 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}
})
metadataContent.size = size
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadataContent)
await handler.outputFile(metadataFilename, JSON.stringify(metadataContent), {
dirMode: backup.config.dirMode,
})
// TODO: run cleanup?
}

View File

@@ -34,6 +34,7 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
const { job, scheduleId, vm } = backup
const adapter = this._adapter
const handler = adapter.handler
const backupDir = getVmBackupDir(vm.uuid)
// TODO: clean VM backup directory
@@ -73,7 +74,9 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
return { size: sizeContainer.size }
})
metadata.size = sizeContainer.size
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadata)
await handler.outputFile(metadataFilename, JSON.stringify(metadata), {
dirMode: backup.config.dirMode,
})
if (!deleteFirst) {
await deleteOldBackups()

View File

@@ -9,6 +9,4 @@ exports.AbstractWriter = class AbstractWriter {
beforeBackup() {}
afterBackup() {}
healthCheck(sr) {}
}

View File

@@ -3,15 +3,11 @@
const { createLogger } = require('@xen-orchestra/log')
const { join } = require('path')
const assert = require('assert')
const { formatFilenameDate } = require('../_filenameDate.js')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup.js')
const { ImportVmBackup } = require('../ImportVmBackup.js')
const { Task } = require('../Task.js')
const MergeWorker = require('../merge-worker/index.js')
const { formatFilenameDate } = require('../_filenameDate.js')
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
const { warn } = createLogger('xo:backups:MixinBackupWriter')
exports.MixinBackupWriter = (BaseClass = Object) =>
class MixinBackupWriter extends BaseClass {
@@ -29,18 +25,11 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
async _cleanVm(options) {
try {
return await Task.run({ name: 'clean-vm' }, () => {
return this._adapter.cleanVm(this.#vmBackupDir, {
...options,
fixMetadata: true,
logInfo: info,
logWarn: (message, data) => {
warn(message, data)
Task.warning(message, data)
},
lock: false,
mergeBlockConcurrency: this._backup.config.mergeBlockConcurrency,
})
return await this._adapter.cleanVm(this.#vmBackupDir, {
...options,
fixMetadata: true,
onLog: warn,
lock: false,
})
} catch (error) {
warn(error)
@@ -76,38 +65,4 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
await MergeWorker.run(remotePath)
}
}
healthCheck(sr) {
assert.notStrictEqual(
this._metadataFileName,
undefined,
'Metadata file name should be defined before making a healthcheck'
)
return Task.run(
{
name: 'health check',
},
async () => {
const xapi = sr.$xapi
const srUuid = sr.uuid
const adapter = this._adapter
const metadata = await adapter.readVmBackupMetadata(this._metadataFileName)
const { id: restoredId } = await new ImportVmBackup({
adapter,
metadata,
srUuid,
xapi,
}).run()
const restoredVm = xapi.getObject(restoredId)
try {
await new HealthCheckVmBackup({
restoredVm,
xapi,
}).run()
} finally {
await xapi.VM_destroy(restoredVm.$ref)
}
}
)
}
}

View File

@@ -18,7 +18,7 @@
"preferGlobal": true,
"dependencies": {
"golike-defer": "^0.5.1",
"xen-api": "^1.2.2"
"xen-api": "^1.2.0"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -22,7 +22,7 @@ await ee.emitAsync('start')
// error handling though:
await ee.emitAsync(
{
onError(error, event, listener) {
onError(error) {
console.warn(error)
},
},

View File

@@ -40,7 +40,7 @@ await ee.emitAsync('start')
// error handling though:
await ee.emitAsync(
{
onError(error, event, listener) {
onError(error) {
console.warn(error)
},
},

View File

@@ -1,7 +1,5 @@
'use strict'
const identity = v => v
module.exports = function emitAsync(event) {
let opts
let i = 1
@@ -19,18 +17,12 @@ module.exports = function emitAsync(event) {
}
const onError = opts != null && opts.onError
const addErrorHandler = onError
? (promise, listener) => promise.catch(error => onError(error, event, listener))
: identity
return Promise.all(
this.listeners(event).map(listener =>
addErrorHandler(
new Promise(resolve => {
resolve(listener.apply(this, args))
}),
listener
)
new Promise(resolve => {
resolve(listener.apply(this, args))
}).catch(onError)
)
)
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/emit-async",
"version": "1.0.0",
"version": "0.1.0",
"license": "ISC",
"description": "Emit an event for async listeners to settle",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/emit-async",

View File

@@ -1,19 +0,0 @@
## metadata files
- Older remotes dont have any metadata file
- Remote used since 5.75 have two files : encryption.json and metadata.json
The metadata files are checked by the sync() method. If the check fails it MUST throw an error and dismount.
If the remote is empty, the `sync` method creates them
### encryption.json
A non encrypted file contain the algorithm and parameters used for this remote.
This MUST NOT contains the key.
### metadata.json
An encrypted JSON file containing the settings of a remote. Today this is an empty JSON file ( `{random: <randomuuid>}` ), it serves to check if the encryption key set in the remote is valid, but in the future will be able to store some remote settings to ease disaster recovery.
If this file can't be read (decrypted, decompressed, .. ), that means that the remote settings have been updated. If the remote is empty, update the `encryption.json` and `metadata.json` files , else raise an error.

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "3.1.0",
"version": "1.0.1",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
@@ -17,18 +17,18 @@
"xo-fs": "./cli.js"
},
"engines": {
"node": ">=14.13"
"node": ">=14"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.54.0",
"@aws-sdk/lib-storage": "^3.54.0",
"@aws-sdk/middleware-apply-body-checksum": "^3.58.0",
"@aws-sdk/node-http-handler": "^3.54.0",
"@marsaud/smb2": "^0.18.0",
"@sindresorhus/df": "^3.1.1",
"@vates/async-each": "^1.0.0",
"@vates/async-each": "^0.1.0",
"@vates/coalesce-calls": "^0.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/read-chunk": "^1.0.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.3.0",
"bind-property-descriptor": "^2.0.0",
@@ -40,10 +40,9 @@
"lodash": "^4.17.4",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"pumpify": "^2.0.1",
"readable-stream": "^4.1.0",
"readable-stream": "^3.0.6",
"through2": "^4.0.2",
"xo-remote-parser": "^0.9.1"
"xo-remote-parser": "^0.8.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -51,6 +50,7 @@
"@babel/plugin-proposal-decorators": "^7.1.6",
"@babel/plugin-proposal-function-bind": "^7.0.0",
"@babel/preset-env": "^7.8.0",
"async-iterator-to-stream": "^1.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"dotenv": "^16.0.0",
@@ -68,9 +68,5 @@
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"exports": {
".": "./dist/index.js",
"./path": "./dist/path.js"
}
}

View File

@@ -1,71 +0,0 @@
const { readChunk } = require('@vates/read-chunk')
const crypto = require('crypto')
const pumpify = require('pumpify')
function getEncryptor(key) {
if (key === undefined) {
return {
id: 'NULL_ENCRYPTOR',
algorithm: 'none',
key: 'none',
ivLength: 0,
encryptData: buffer => buffer,
encryptStream: stream => stream,
decryptData: buffer => buffer,
decryptStream: stream => stream,
}
}
const algorithm = 'aes-256-cbc'
const ivLength = 16
function encryptStream(input) {
const iv = crypto.randomBytes(ivLength)
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
const encrypted = pumpify(input, cipher)
encrypted.unshift(iv)
return encrypted
}
async function decryptStream(encryptedStream) {
const iv = await readChunk(encryptedStream, ivLength)
const cipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
/**
* WARNING
*
* the crytped size has an initializtion vector + a padding at the end
* whe can't predict the decrypted size from the start of the encrypted size
* thus, we can't set decrypted.length reliably
*
*/
return pumpify(encryptedStream, cipher)
}
function encryptData(buffer) {
const iv = crypto.randomBytes(ivLength)
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
const encrypted = cipher.update(buffer)
return Buffer.concat([iv, encrypted, cipher.final()])
}
function decryptData(buffer) {
const iv = buffer.slice(0, ivLength)
const encrypted = buffer.slice(ivLength)
const decipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
const decrypted = decipher.update(encrypted)
return Buffer.concat([decrypted, decipher.final()])
}
return {
id: algorithm,
algorithm,
key,
ivLength,
encryptData,
encryptStream,
decryptData,
decryptStream,
}
}
exports._getEncryptor = getEncryptor

View File

@@ -1,6 +1,6 @@
import path from 'path'
const { basename, dirname, join, resolve, relative, sep } = path.posix
const { basename, dirname, join, resolve, sep } = path.posix
export { basename, dirname, join }
@@ -19,6 +19,3 @@ export function split(path) {
return parts
}
export const relativeFromFile = (file, path) => relative(dirname(file), path)
export const resolveFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)

View File

@@ -1,20 +1,15 @@
import asyncMapSettled from '@xen-orchestra/async-map/legacy'
import assert from 'assert'
import getStream from 'get-stream'
import { coalesceCalls } from '@vates/coalesce-calls'
import { createLogger } from '@xen-orchestra/log'
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { parse } from 'xo-remote-parser'
import { pipeline } from 'stream'
import { randomBytes, randomUUID } from 'crypto'
import { randomBytes } from 'crypto'
import { synchronized } from 'decorator-synchronized'
import { basename, dirname, normalize as normalizePath } from './path'
import { basename, dirname, normalize as normalizePath } from './_path'
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
import { _getEncryptor } from './_encryptor'
const { info, warn } = createLogger('@xen-orchestra:fs')
const checksumFile = file => file + '.checksum'
const computeRate = (hrtime, size) => {
@@ -25,9 +20,6 @@ const computeRate = (hrtime, size) => {
const DEFAULT_TIMEOUT = 6e5 // 10 min
const DEFAULT_MAX_PARALLEL_OPERATIONS = 10
const ENCRYPTION_DESC_FILENAME = 'encryption.json'
const ENCRYPTION_METADATA_FILENAME = 'metadata.json'
const ignoreEnoent = error => {
if (error == null || error.code !== 'ENOENT') {
throw error
@@ -68,7 +60,6 @@ class PrefixWrapper {
}
export default class RemoteHandlerAbstract {
_encryptor
constructor(remote, options = {}) {
if (remote.url === 'test://') {
this._remote = remote
@@ -79,7 +70,6 @@ export default class RemoteHandlerAbstract {
}
}
;({ highWaterMark: this._highWaterMark, timeout: this._timeout = DEFAULT_TIMEOUT } = options)
this._encryptor = _getEncryptor(this._remote.encryptionKey)
const sharedLimit = limitConcurrency(options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS)
this.closeFile = sharedLimit(this.closeFile)
@@ -118,51 +108,90 @@ export default class RemoteHandlerAbstract {
await this.__closeFile(fd)
}
async createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
if (options.end !== undefined || options.start !== undefined) {
assert.strictEqual(this.isEncrypted, false, `Can't read part of a file when encryption is active ${file}`)
}
// TODO: remove method
async createOutputStream(file, { checksum = false, dirMode, ...options } = {}) {
if (typeof file === 'string') {
file = normalizePath(file)
}
let stream = await timeout.call(
this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }),
const path = typeof file === 'string' ? file : file.path
const streamP = timeout.call(
this._createOutputStream(file, {
dirMode,
flags: 'wx',
...options,
}),
this._timeout
)
// detect early errors
await fromEvent(stream, 'readable')
if (checksum) {
try {
const path = typeof file === 'string' ? file : file.path
const checksum = await this._readFile(checksumFile(path), { flags: 'r' })
const { length } = stream
stream = validChecksumOfReadStream(stream, String(checksum).trim())
stream.length = length
} catch (error) {
if (!(ignoreMissingChecksum && error.code === 'ENOENT')) {
throw error
}
}
if (!checksum) {
return streamP
}
if (this.isEncrypted) {
stream = this._encryptor.decryptStream(stream)
} else {
// try to add the length prop if missing and not a range stream
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
try {
stream.length = await this._getSize(file)
} catch (error) {
// ignore errors
}
}
const checksumStream = createChecksumStream()
const forwardError = error => {
checksumStream.emit('error', error)
}
return stream
const stream = await streamP
stream.on('error', forwardError)
checksumStream.pipe(stream)
checksumStream.checksumWritten = checksumStream.checksum
.then(value => this._outputFile(checksumFile(path), value, { flags: 'wx' }))
.catch(forwardError)
return checksumStream
}
createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
if (typeof file === 'string') {
file = normalizePath(file)
}
const path = typeof file === 'string' ? file : file.path
const streamP = timeout
.call(this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }), this._timeout)
.then(stream => {
// detect early errors
let promise = fromEvent(stream, 'readable')
// try to add the length prop if missing and not a range stream
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
promise = Promise.all([
promise,
ignoreErrors.call(
this._getSize(file).then(size => {
stream.length = size
})
),
])
}
return promise.then(() => stream)
})
if (!checksum) {
return streamP
}
// avoid a unhandled rejection warning
ignoreErrors.call(streamP)
return this._readFile(checksumFile(path), { flags: 'r' }).then(
checksum =>
streamP.then(stream => {
const { length } = stream
stream = validChecksumOfReadStream(stream, String(checksum).trim())
stream.length = length
return stream
}),
error => {
if (ignoreMissingChecksum && error && error.code === 'ENOENT') {
return streamP
}
throw error
}
)
}
/**
@@ -178,8 +207,6 @@ export default class RemoteHandlerAbstract {
async outputStream(path, input, { checksum = true, dirMode, validator } = {}) {
path = normalizePath(path)
let checksumStream
input = this._encryptor.encryptStream(input)
if (checksum) {
checksumStream = createChecksumStream()
pipeline(input, checksumStream, noop)
@@ -190,8 +217,6 @@ export default class RemoteHandlerAbstract {
validator,
})
if (checksum) {
// using _outpuFile means the checksum will NOT be encrypted
// it is by design to allow checking of encrypted files without the key
await this._outputFile(checksumFile(path), await checksumStream.checksum, { dirMode, flags: 'wx' })
}
}
@@ -211,13 +236,8 @@ export default class RemoteHandlerAbstract {
return timeout.call(this._getInfo(), this._timeout)
}
// when using encryption, the file size is aligned with the encryption block size ( 16 bytes )
// that means that the size will be 1 to 16 bytes more than the content size + the initialized vector length (16 bytes)
async getSize(file) {
assert.strictEqual(this.isEncrypted, false, `Can't compute size of an encrypted file ${file}`)
const size = await timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
return size - this._encryptor.ivLength
return timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
}
async list(dir, { filter, ignoreMissing = false, prependDir = false } = {}) {
@@ -263,18 +283,15 @@ export default class RemoteHandlerAbstract {
}
async outputFile(file, data, { dirMode, flags = 'wx' } = {}) {
const encryptedData = this._encryptor.encryptData(data)
await this._outputFile(normalizePath(file), encryptedData, { dirMode, flags })
await this._outputFile(normalizePath(file), data, { dirMode, flags })
}
async read(file, buffer, position) {
assert.strictEqual(this.isEncrypted, false, `Can't read part of an encrypted file ${file}`)
return this._read(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
}
async readFile(file, { flags = 'r' } = {}) {
const data = await this._readFile(normalizePath(file), { flags })
return this._encryptor.decryptData(data)
return this._readFile(normalizePath(file), { flags })
}
async rename(oldPath, newPath, { checksum = false } = {}) {
@@ -314,61 +331,6 @@ export default class RemoteHandlerAbstract {
@synchronized()
async sync() {
await this._sync()
try {
await this._checkMetadata()
} catch (error) {
await this._forget()
throw error
}
}
async _canWriteMetadata() {
const list = await this.list('/', {
filter: e => !e.startsWith('.') && e !== ENCRYPTION_DESC_FILENAME && e !== ENCRYPTION_METADATA_FILENAME,
})
return list.length === 0
}
async _createMetadata() {
await Promise.all([
this._writeFile(
normalizePath(ENCRYPTION_DESC_FILENAME),
JSON.stringify({ algorithm: this._encryptor.algorithm }),
{
flags: 'w',
}
), // not encrypted
this.writeFile(ENCRYPTION_METADATA_FILENAME, `{"random":"${randomUUID()}"}`, { flags: 'w' }), // encrypted
])
}
async _checkMetadata() {
try {
// this file is not encrypted
const data = await this._readFile(normalizePath(ENCRYPTION_DESC_FILENAME))
JSON.parse(data)
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
}
}
try {
// this file is encrypted
const data = await this.readFile(ENCRYPTION_METADATA_FILENAME)
JSON.parse(data)
} catch (error) {
if (error.code === 'ENOENT' || (await this._canWriteMetadata())) {
info('will update metadata of this remote')
return this._createMetadata()
}
warn(
`The encryptionKey settings of this remote does not match the key used to create it. You won't be able to read any data from this remote`,
{ error }
)
// will probably send a ERR_OSSL_EVP_BAD_DECRYPT if key is incorrect
throw error
}
}
async test() {
@@ -395,12 +357,11 @@ export default class RemoteHandlerAbstract {
readRate: computeRate(readDuration, SIZE),
}
} catch (error) {
warn(`error while testing the remote at step ${step}`, { error })
return {
success: false,
step,
file: testFileName,
error,
error: error.message || String(error),
}
} finally {
ignoreErrors.call(this._unlink(testFileName))
@@ -422,13 +383,11 @@ export default class RemoteHandlerAbstract {
}
async write(file, buffer, position) {
assert.strictEqual(this.isEncrypted, false, `Can't write part of a file with encryption ${file}`)
await this._write(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
}
async writeFile(file, data, { flags = 'wx' } = {}) {
const encryptedData = this._encryptor.encryptData(data)
await this._writeFile(normalizePath(file), encryptedData, { flags })
await this._writeFile(normalizePath(file), data, { flags })
}
// Methods that can be called by private methods to avoid parallel limit on public methods
@@ -461,10 +420,6 @@ export default class RemoteHandlerAbstract {
// Methods that can be implemented by inheriting classes
useVhdDirectory() {
return this._remote.useVhdDirectory ?? false
}
async _closeFile(fd) {
throw new Error('Not implemented')
}
@@ -547,13 +502,9 @@ export default class RemoteHandlerAbstract {
async _outputStream(path, input, { dirMode, validator }) {
const tmpPath = `${dirname(path)}/.${basename(path)}`
const output = await timeout.call(
this._createOutputStream(tmpPath, {
dirMode,
flags: 'wx',
}),
this._timeout
)
const output = await this.createOutputStream(tmpPath, {
dirMode,
})
try {
await fromCallback(pipeline, input, output)
if (validator !== undefined) {
@@ -636,10 +587,6 @@ export default class RemoteHandlerAbstract {
async _writeFile(file, data, options) {
throw new Error('Not implemented')
}
get isEncrypted() {
return this._encryptor.id !== 'NULL_ENCRYPTOR'
}
}
function createPrefixWrapperMethods() {

View File

@@ -30,6 +30,18 @@ describe('closeFile()', () => {
})
})
describe('createOutputStream()', () => {
it(`throws in case of timeout`, async () => {
const testHandler = new TestHandler({
createOutputStream: () => new Promise(() => {}),
})
const promise = testHandler.createOutputStream('File')
jest.advanceTimersByTime(TIMEOUT)
await expect(promise).rejects.toThrowError(TimeoutError)
})
})
describe('getInfo()', () => {
it('throws in case of timeout', async () => {
const testHandler = new TestHandler({

View File

@@ -1,7 +1,10 @@
/* eslint-env jest */
import 'dotenv/config'
import asyncIteratorToStream from 'async-iterator-to-stream'
import { forOwn, random } from 'lodash'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { tmpdir } from 'os'
import { getHandler } from '.'
@@ -24,6 +27,9 @@ const unsecureRandomBytes = n => {
const TEST_DATA_LEN = 1024
const TEST_DATA = unsecureRandomBytes(TEST_DATA_LEN)
const createTestDataStream = asyncIteratorToStream(function* () {
yield TEST_DATA
})
const rejectionOf = p =>
p.then(
@@ -76,6 +82,14 @@ handlers.forEach(url => {
})
})
describe('#createOutputStream()', () => {
it('creates parent dir if missing', async () => {
const stream = await handler.createOutputStream('dir/file')
await fromCallback(pipeline, createTestDataStream(), stream)
await expect(await handler.readFile('dir/file')).toEqual(TEST_DATA)
})
})
describe('#getInfo()', () => {
let info
beforeAll(async () => {

View File

@@ -5,6 +5,7 @@ import RemoteHandlerLocal from './local'
import RemoteHandlerNfs from './nfs'
import RemoteHandlerS3 from './s3'
import RemoteHandlerSmb from './smb'
import RemoteHandlerSmbMount from './smb-mount'
const HANDLERS = {
file: RemoteHandlerLocal,
@@ -14,8 +15,10 @@ const HANDLERS = {
try {
execa.sync('mount.cifs', ['-V'])
HANDLERS.smb = RemoteHandlerSmbMount
} catch (_) {
HANDLERS.smb = RemoteHandlerSmb
} catch (_) {}
}
export const getHandler = (remote, ...rest) => {
const Handler = HANDLERS[parse(remote.url).type]

View File

@@ -1,38 +1,13 @@
import df from '@sindresorhus/df'
import fs from 'fs-extra'
import lockfile from 'proper-lockfile'
import { createLogger } from '@xen-orchestra/log'
import { fromEvent, retry } from 'promise-toolbox'
import RemoteHandlerAbstract from './abstract'
const { info, warn } = createLogger('xo:fs:local')
// save current stack trace and add it to any rejected error
//
// This is especially useful when the resolution is separate from the initial
// call, which is often the case with RPC libs.
//
// There is a perf impact and it should be avoided in production.
async function addSyncStackTrace(fn, ...args) {
const stackContainer = new Error()
try {
return await fn.apply(this, args)
} catch (error) {
error.syncStack = stackContainer.stack
throw error
}
}
function dontAddSyncStackTrace(fn, ...args) {
return fn.apply(this, args)
}
export default class LocalHandler extends RemoteHandlerAbstract {
constructor(remote, opts = {}) {
super(remote)
this._addSyncStackTrace = opts.syncStackTraces ?? true ? addSyncStackTrace : dontAddSyncStackTrace
this._retriesOnEagain = {
delay: 1e3,
retries: 9,
@@ -55,17 +30,17 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
async _closeFile(fd) {
return this._addSyncStackTrace(fs.close, fd)
return fs.close(fd)
}
async _copy(oldPath, newPath) {
return this._addSyncStackTrace(fs.copy, this._getFilePath(oldPath), this._getFilePath(newPath))
return fs.copy(this._getFilePath(oldPath), this._getFilePath(newPath))
}
async _createReadStream(file, options) {
if (typeof file === 'string') {
const stream = fs.createReadStream(this._getFilePath(file), options)
await this._addSyncStackTrace(fromEvent, stream, 'open')
await fromEvent(stream, 'open')
return stream
}
return fs.createReadStream('', {
@@ -78,7 +53,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
async _createWriteStream(file, options) {
if (typeof file === 'string') {
const stream = fs.createWriteStream(this._getFilePath(file), options)
await this._addSyncStackTrace(fromEvent, stream, 'open')
await fromEvent(stream, 'open')
return stream
}
return fs.createWriteStream('', {
@@ -104,98 +79,71 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
async _getSize(file) {
const stats = await this._addSyncStackTrace(fs.stat, this._getFilePath(typeof file === 'string' ? file : file.path))
const stats = await fs.stat(this._getFilePath(typeof file === 'string' ? file : file.path))
return stats.size
}
async _list(dir) {
return this._addSyncStackTrace(fs.readdir, this._getFilePath(dir))
return fs.readdir(this._getFilePath(dir))
}
async _lock(path) {
const acquire = lockfile.lock.bind(undefined, this._getFilePath(path), {
async onCompromised(error) {
warn('lock compromised', { error })
try {
release = await acquire()
info('compromised lock was reacquired')
} catch (error) {
warn('compromised lock could not be reacquired', { error })
}
},
})
let release = await this._addSyncStackTrace(acquire)
return async () => {
try {
await this._addSyncStackTrace(release)
} catch (error) {
warn('lock could not be released', { error })
}
}
_lock(path) {
return lockfile.lock(this._getFilePath(path))
}
_mkdir(dir, { mode }) {
return this._addSyncStackTrace(fs.mkdir, this._getFilePath(dir), { mode })
return fs.mkdir(this._getFilePath(dir), { mode })
}
async _openFile(path, flags) {
return this._addSyncStackTrace(fs.open, this._getFilePath(path), flags)
return fs.open(this._getFilePath(path), flags)
}
async _read(file, buffer, position) {
const needsClose = typeof file === 'string'
file = needsClose ? await this._addSyncStackTrace(fs.open, this._getFilePath(file), 'r') : file.fd
file = needsClose ? await fs.open(this._getFilePath(file), 'r') : file.fd
try {
return await this._addSyncStackTrace(
fs.read,
file,
buffer,
0,
buffer.length,
position === undefined ? null : position
)
return await fs.read(file, buffer, 0, buffer.length, position === undefined ? null : position)
} finally {
if (needsClose) {
await this._addSyncStackTrace(fs.close, file)
await fs.close(file)
}
}
}
async _readFile(file, options) {
const filePath = this._getFilePath(file)
return await this._addSyncStackTrace(retry, () => fs.readFile(filePath, options), this._retriesOnEagain)
return await retry(() => fs.readFile(filePath, options), this._retriesOnEagain)
}
async _rename(oldPath, newPath) {
return this._addSyncStackTrace(fs.rename, this._getFilePath(oldPath), this._getFilePath(newPath))
return fs.rename(this._getFilePath(oldPath), this._getFilePath(newPath))
}
async _rmdir(dir) {
return this._addSyncStackTrace(fs.rmdir, this._getFilePath(dir))
return fs.rmdir(this._getFilePath(dir))
}
async _sync() {
const path = this._getRealPath('/')
await this._addSyncStackTrace(fs.ensureDir, path)
await this._addSyncStackTrace(fs.access, path, fs.R_OK | fs.W_OK)
await fs.ensureDir(path)
await fs.access(path, fs.R_OK | fs.W_OK)
}
_truncate(file, len) {
return this._addSyncStackTrace(fs.truncate, this._getFilePath(file), len)
return fs.truncate(this._getFilePath(file), len)
}
async _unlink(file) {
const filePath = this._getFilePath(file)
return await this._addSyncStackTrace(retry, () => fs.unlink(filePath), this._retriesOnEagain)
return await retry(() => fs.unlink(filePath), this._retriesOnEagain)
}
_writeFd(file, buffer, position) {
return this._addSyncStackTrace(fs.write, file.fd, buffer, 0, buffer.length, position)
return fs.write(file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, { flags }) {
return this._addSyncStackTrace(fs.writeFile, this._getFilePath(file), data, { flag: flags })
return fs.writeFile(this._getFilePath(file), data, { flag: flags })
}
}

View File

@@ -27,7 +27,7 @@ import copyStreamToBuffer from './_copyStreamToBuffer.js'
import createBufferFromStream from './_createBufferFromStream.js'
import guessAwsRegion from './_guessAwsRegion.js'
import RemoteHandlerAbstract from './abstract'
import { basename, join, split } from './path'
import { basename, join, split } from './_path'
import { asyncEach } from '@vates/async-each'
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
@@ -77,7 +77,9 @@ export default class S3Handler extends RemoteHandlerAbstract {
})
// Workaround for https://github.com/aws/aws-sdk-js-v3/issues/2673
this._s3.middlewareStack.use(getApplyMd5BodyChecksumPlugin(this._s3.config))
this._s3.middlewareStack.use(
getApplyMd5BodyChecksumPlugin(this._s3.config)
)
const parts = split(path)
this._bucket = parts.shift()
@@ -97,12 +99,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
_makePrefix(dir) {
const prefix = join(this._dir, dir, '/')
// no prefix for root
if (prefix !== './') {
return prefix
}
return join(this._dir, dir, '/')
}
_createParams(file) {
@@ -155,14 +152,6 @@ export default class S3Handler extends RemoteHandlerAbstract {
if (e.name === 'EntityTooLarge') {
return this._multipartCopy(oldPath, newPath)
}
// normalize this error code
if (e.name === 'NoSuchKey') {
const error = new Error(`ENOENT: no such file or directory '${oldPath}'`)
error.cause = e
error.code = 'ENOENT'
error.path = oldPath
throw error
}
throw e
}
}
@@ -243,17 +232,14 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
async _createReadStream(path, options) {
try {
return (await this._s3.send(new GetObjectCommand(this._createParams(path)))).Body
} catch (e) {
if (e.name === 'NoSuchKey') {
const error = new Error(`ENOENT: no such file '${path}'`)
error.code = 'ENOENT'
error.path = path
throw error
}
throw e
if (!(await this._isFile(path))) {
const error = new Error(`ENOENT: no such file '${path}'`)
error.code = 'ENOENT'
error.path = path
throw error
}
return (await this._s3.send(new GetObjectCommand(this._createParams(path)))).Body
}
async _unlink(path) {
@@ -533,8 +519,4 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
async _closeFile(fd) {}
useVhdDirectory() {
return true
}
}

View File

@@ -0,0 +1,23 @@
import { parse } from 'xo-remote-parser'
import MountHandler from './_mount'
import { normalize } from './_path'
export default class SmbMountHandler extends MountHandler {
constructor(remote, opts) {
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
super(remote, opts, {
type: 'cifs',
device: '//' + host + normalize(path),
options: `domain=${domain}`,
env: {
USER: username,
PASSWD: password,
},
})
}
get type() {
return 'smb'
}
}

View File

@@ -1,23 +1,163 @@
import { parse } from 'xo-remote-parser'
import Smb2 from '@marsaud/smb2'
import MountHandler from './_mount'
import { normalize } from './path'
import RemoteHandlerAbstract from './abstract'
export default class SmbHandler extends MountHandler {
// Normalize the error code for file not found.
const wrapError = (error, code) => ({
__proto__: error,
cause: error,
code,
})
const normalizeError = (error, shouldBeDirectory) => {
const { code } = error
throw code === 'STATUS_DIRECTORY_NOT_EMPTY'
? wrapError(error, 'ENOTEMPTY')
: code === 'STATUS_FILE_IS_A_DIRECTORY'
? wrapError(error, 'EISDIR')
: code === 'STATUS_NOT_A_DIRECTORY'
? wrapError(error, 'ENOTDIR')
: code === 'STATUS_OBJECT_NAME_NOT_FOUND' || code === 'STATUS_OBJECT_PATH_NOT_FOUND'
? wrapError(error, 'ENOENT')
: code === 'STATUS_OBJECT_NAME_COLLISION'
? wrapError(error, 'EEXIST')
: code === 'STATUS_NOT_SUPPORTED' || code === 'STATUS_INVALID_PARAMETER'
? wrapError(error, shouldBeDirectory ? 'ENOTDIR' : 'EISDIR')
: error
}
const normalizeDirError = error => normalizeError(error, true)
export default class SmbHandler extends RemoteHandlerAbstract {
constructor(remote, opts) {
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
super(remote, opts, {
type: 'cifs',
device: '//' + host + normalize(path),
options: `domain=${domain}`,
env: {
USER: username,
PASSWD: password,
},
})
super(remote, opts)
// defined in _sync()
this._client = undefined
const prefix = this._remote.path
this._prefix = prefix !== '' ? prefix + '\\' : prefix
}
get type() {
return 'smb'
}
_getFilePath(file) {
return this._prefix + (typeof file === 'string' ? file : file.path).slice(1).replace(/\//g, '\\')
}
_dirname(file) {
const parts = file.split('\\')
parts.pop()
return parts.join('\\')
}
_closeFile(file) {
return this._client.close(file).catch(normalizeError)
}
_createReadStream(file, options) {
if (typeof file === 'string') {
file = this._getFilePath(file)
} else {
options = { autoClose: false, ...options, fd: file.fd }
file = ''
}
return this._client.createReadStream(file, options).catch(normalizeError)
}
_createWriteStream(file, options) {
if (typeof file === 'string') {
file = this._getFilePath(file)
} else {
options = { autoClose: false, ...options, fd: file.fd }
file = ''
}
return this._client.createWriteStream(file, options).catch(normalizeError)
}
_forget() {
const client = this._client
this._client = undefined
return client.disconnect()
}
_getSize(file) {
return this._client.getSize(this._getFilePath(file)).catch(normalizeError)
}
_list(dir) {
return this._client.readdir(this._getFilePath(dir)).catch(normalizeDirError)
}
_mkdir(dir, { mode }) {
return this._client.mkdir(this._getFilePath(dir), mode).catch(normalizeDirError)
}
// TODO: add flags
_openFile(path, flags) {
return this._client.open(this._getFilePath(path), flags).catch(normalizeError)
}
async _read(file, buffer, position) {
const client = this._client
const needsClose = typeof file === 'string'
file = needsClose ? await client.open(this._getFilePath(file)) : file.fd
try {
return await client.read(file, buffer, 0, buffer.length, position)
} catch (error) {
normalizeError(error)
} finally {
if (needsClose) {
await client.close(file)
}
}
}
_readFile(file, options) {
return this._client.readFile(this._getFilePath(file), options).catch(normalizeError)
}
_rename(oldPath, newPath) {
return this._client
.rename(this._getFilePath(oldPath), this._getFilePath(newPath), {
replace: true,
})
.catch(normalizeError)
}
_rmdir(dir) {
return this._client.rmdir(this._getFilePath(dir)).catch(normalizeDirError)
}
_sync() {
const remote = this._remote
this._client = new Smb2({
share: `\\\\${remote.host}`,
domain: remote.domain,
username: remote.username,
password: remote.password,
autoCloseTimeout: 0,
})
// Check access (smb2 does not expose connect in public so far...)
return this.list('.')
}
_truncate(file, len) {
return this._client.truncate(this._getFilePath(file), len).catch(normalizeError)
}
_unlink(file) {
return this._client.unlink(this._getFilePath(file)).catch(normalizeError)
}
_writeFd(file, buffer, position) {
return this._client.write(file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, options) {
return this._client.writeFile(this._getFilePath(file), data, options).catch(normalizeError)
}
}

View File

@@ -1,16 +1,15 @@
import get from 'lodash/get.js'
import identity from 'lodash/identity.js'
import isEqual from 'lodash/isEqual.js'
import { createLogger } from '@xen-orchestra/log'
import { parseDuration } from '@vates/parse-duration'
import { watch } from 'app-conf'
'use strict'
const get = require('lodash/get')
const identity = require('lodash/identity')
const isEqual = require('lodash/isEqual')
const { createLogger } = require('@xen-orchestra/log')
const { parseDuration } = require('@vates/parse-duration')
const { watch } = require('app-conf')
const { warn } = createLogger('xo:mixins:config')
// if path is undefined, an empty string or an empty array, returns the root value
const niceGet = (value, path) => (path === undefined || path.length === 0 ? value : get(value, path))
export default class Config {
module.exports = class Config {
constructor(app, { appDir, appName, config }) {
this._config = config
const watchers = (this._watchers = new Set())
@@ -33,7 +32,7 @@ export default class Config {
}
get(path) {
const value = niceGet(this._config, path)
const value = get(this._config, path)
if (value === undefined) {
throw new TypeError('missing config entry: ' + path)
}
@@ -45,27 +44,20 @@ export default class Config {
}
getOptional(path) {
return niceGet(this._config, path)
return get(this._config, path)
}
watch(path, cb) {
// short syntax for the whole config: watch(cb)
if (typeof path === 'function') {
cb = path
path = undefined
}
// internal arg
const processor = arguments.length > 2 ? arguments[2] : identity
let prev
const watcher = config => {
try {
const value = processor(niceGet(config, path))
const value = processor(get(config, path))
if (!isEqual(value, prev)) {
const previous = prev
prev = value
cb(value, previous, path)
cb(value)
}
} catch (error) {
warn('watch', { error, path })

View File

@@ -0,0 +1,51 @@
'use strict'
const assert = require('assert')
const emitAsync = require('@xen-orchestra/emit-async')
const EventEmitter = require('events')
const { createLogger } = require('@xen-orchestra/log')
const { debug, warn } = createLogger('xo:mixins:hooks')
const runHook = async (emitter, hook) => {
debug(`${hook} start…`)
await emitAsync.call(
emitter,
{
onError: error => warn(`${hook} failure`, { error }),
},
hook
)
debug(`${hook} finished`)
}
module.exports = class Hooks extends EventEmitter {
// Run *clean* async listeners.
//
// They normalize existing data, clear invalid entries, etc.
clean() {
return runHook(this, 'clean')
}
_status = 'stopped'
// Run *start* async listeners.
//
// They initialize the application.
async start() {
assert.strictEqual(this._status, 'stopped')
this._status = 'starting'
await runHook(this, 'start')
this.emit((this._status = 'started'))
}
// Run *stop* async listeners.
//
// They close connections, unmount file systems, save states, etc.
async stop() {
assert.strictEqual(this._status, 'started')
this._status = 'stopping'
await runHook(this, 'stop')
this.emit((this._status = 'stopped'))
}
}

View File

@@ -1,70 +0,0 @@
import assert from 'assert'
import emitAsync from '@xen-orchestra/emit-async'
import EventEmitter from 'events'
import { createLogger } from '@xen-orchestra/log'
const { debug, warn } = createLogger('xo:mixins:hooks')
const runHook = async (emitter, hook) => {
debug(`${hook} start…`)
await emitAsync.call(
emitter,
{
onError: error => warn(`${hook} failure`, { error }),
},
hook
)
debug(`${hook} finished`)
}
export default class Hooks extends EventEmitter {
// Run *clean* async listeners.
//
// They normalize existing data, clear invalid entries, etc.
clean() {
return runHook(this, 'clean')
}
_status = 'stopped'
// Run *start* async listeners.
//
// They initialize the application.
//
// *startCore* is automatically called if necessary.
async start() {
if (this._status === 'stopped') {
await this.startCore()
} else {
assert.strictEqual(this._status, 'core started')
}
this._status = 'starting'
await runHook(this, 'start')
this.emit((this._status = 'started'))
}
// Run *start core* async listeners.
//
// They initialize core features of the application (connect to databases,
// etc.) and should be fast and side-effects free.
async startCore() {
assert.strictEqual(this._status, 'stopped')
this._status = 'starting core'
await runHook(this, 'start core')
this.emit((this._status = 'core started'))
}
// Run *stop* async listeners if necessary and *stop core* listeners.
//
// They close connections, unmount file systems, save states, etc.
async stop() {
if (this._status !== 'core started') {
assert.strictEqual(this._status, 'started')
this._status = 'stopping'
await runHook(this, 'stop')
this._status = 'core started'
}
await runHook(this, 'stop core')
this.emit((this._status = 'stopped'))
}
}

View File

@@ -1,15 +1,15 @@
import { createLogger } from '@xen-orchestra/log'
import { EventListenersManager } from '@vates/event-listeners-manager'
import { pipeline } from 'stream'
import { ServerResponse, request } from 'http'
import assert from 'assert'
import fromCallback from 'promise-toolbox/fromCallback'
import fromEvent from 'promise-toolbox/fromEvent'
import net from 'net'
'use strict'
import { parseBasicAuth } from './_parseBasicAuth.mjs'
const { debug, warn } = require('@xen-orchestra/log').createLogger('xo:mixins:HttpProxy')
const { EventListenersManager } = require('@vates/event-listeners-manager')
const { pipeline } = require('stream')
const { ServerResponse, request } = require('http')
const assert = require('assert')
const fromCallback = require('promise-toolbox/fromCallback')
const fromEvent = require('promise-toolbox/fromEvent')
const net = require('net')
const { debug, warn } = createLogger('xo:mixins:HttpProxy')
const { parseBasicAuth } = require('./_parseBasicAuth.js')
const IGNORED_HEADERS = new Set([
// https://datatracker.ietf.org/doc/html/rfc2616#section-13.5.1
@@ -26,7 +26,7 @@ const IGNORED_HEADERS = new Set([
'host',
])
export default class HttpProxy {
module.exports = class HttpProxy {
#app
constructor(app, { httpServer }) {
@@ -40,7 +40,7 @@ export default class HttpProxy {
this.#app = app
const events = new EventListenersManager(httpServer)
app.config.watch('http.proxy.enabled', (enabled = true) => {
app.config.watch('http.proxy.enabled', (enabled = false) => {
events.removeAll()
if (enabled) {
events.add('connect', this.#handleConnect.bind(this)).add('request', this.#handleRequest.bind(this))

View File

@@ -1,214 +0,0 @@
import { createLogger } from '@xen-orchestra/log'
import { createSecureContext } from 'tls'
import { dirname } from 'node:path'
import { X509Certificate } from 'node:crypto'
import acme from 'acme-client'
import fs from 'node:fs/promises'
import get from 'lodash/get.js'
const { debug, info, warn } = createLogger('xo:mixins:sslCertificate')
acme.setLogger(message => {
debug(message)
})
// - create any missing parent directories
// - replace existing files
// - secure permissions (read-only for the owner)
async function outputFile(path, content) {
await fs.mkdir(dirname(path), { recursive: true })
try {
await fs.unlink(path)
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
}
}
await fs.writeFile(path, content, { flag: 'wx', mode: 0o400 })
}
// from https://github.com/publishlab/node-acme-client/blob/master/examples/auto.js
class SslCertificate {
#cert
#challengeCreateFn
#challengeRemoveFn
#delayBeforeRenewal = 30 * 24 * 60 * 60 * 1000 // 30 days
#secureContext
#updateSslCertificatePromise
constructor({ challengeCreateFn, challengeRemoveFn }, cert, key) {
this.#challengeCreateFn = challengeCreateFn
this.#challengeRemoveFn = challengeRemoveFn
this.#set(cert, key)
}
get #isValid() {
const cert = this.#cert
return cert !== undefined && Date.parse(cert.validTo) > Date.now() && cert.issuer !== cert.subject
}
get #shouldBeRenewed() {
return !(this.#isValid && Date.parse(this.#cert.validTo) > Date.now() + this.#delayBeforeRenewal)
}
#set(cert, key) {
this.#cert = new X509Certificate(cert)
this.#secureContext = createSecureContext({ cert, key })
}
async getSecureContext(config) {
if (!this.#shouldBeRenewed) {
return this.#secureContext
}
if (this.#updateSslCertificatePromise === undefined) {
// not currently updating certificate
//
// ensure we only refresh certificate once at a time
//
// promise is cleaned by #updateSslCertificate itself
this.#updateSslCertificatePromise = this.#updateSslCertificate(config)
}
// old certificate is still here, return it while updating
if (this.#isValid) {
return this.#secureContext
}
return this.#updateSslCertificatePromise
}
async #save(certPath, cert, keyPath, key) {
try {
await Promise.all([outputFile(keyPath, key), outputFile(certPath, cert)])
info('new certificate generated', { cert: certPath, key: keyPath })
} catch (error) {
warn(`couldn't write let's encrypt certificates to disk `, { error })
}
}
async #updateSslCertificate(config) {
const { cert: certPath, key: keyPath, acmeEmail, acmeDomain } = config
try {
let { acmeCa = 'letsencrypt/production' } = config
if (!(acmeCa.startsWith('http:') || acmeCa.startsWith('https:'))) {
acmeCa = get(acme.directory, acmeCa.split('/'))
}
/* Init client */
const client = new acme.Client({
directoryUrl: acmeCa,
accountKey: await acme.crypto.createPrivateKey(),
})
/* Create CSR */
let [key, csr] = await acme.crypto.createCsr({
commonName: acmeDomain,
})
csr = csr.toString()
key = key.toString()
debug('Successfully generated key and csr')
/* Certificate */
const cert = await client.auto({
challengeCreateFn: this.#challengeCreateFn,
challengePriority: ['http-01'],
challengeRemoveFn: this.#challengeRemoveFn,
csr,
email: acmeEmail,
skipChallengeVerification: true,
termsOfServiceAgreed: true,
})
debug('Successfully generated certificate')
this.#set(cert, key)
// don't wait for this
this.#save(certPath, cert, keyPath, key)
return this.#secureContext
} catch (error) {
warn(`couldn't renew ssl certificate`, { acmeDomain, error })
} finally {
this.#updateSslCertificatePromise = undefined
}
}
}
export default class SslCertificates {
#app
#challenges = new Map()
#challengeHandlers = {
challengeCreateFn: (authz, challenge, keyAuthorization) => {
this.#challenges.set(challenge.token, keyAuthorization)
},
challengeRemoveFn: (authz, challenge, keyAuthorization) => {
this.#challenges.delete(challenge.token)
},
}
#handlers = new Map()
constructor(app, { httpServer }) {
// don't setup the proxy if httpServer is not present
//
// that can happen when the app is instanciated in another context like xo-server-recover-account
if (httpServer === undefined) {
return
}
const prefix = '/.well-known/acme-challenge/'
httpServer.on('request', (req, res) => {
const { url } = req
if (url.startsWith(prefix)) {
const token = url.slice(prefix.length)
this.#acmeChallendMiddleware(req, res, token)
}
})
this.#app = app
httpServer.getSecureContext = this.getSecureContext.bind(this)
}
async getSecureContext(httpsDomainName, configKey, initialCert, initialKey) {
const config = this.#app.config.get(['http', 'listen', configKey])
const handlers = this.#handlers
const { acmeDomain } = config
// not a let's encrypt protected end point, sommething changed in the configuration
if (acmeDomain === undefined) {
handlers.delete(configKey)
return
}
// server has been access with another domain, don't use the certificate
if (acmeDomain !== httpsDomainName) {
return
}
let handler = handlers.get(configKey)
if (handler === undefined) {
// register the handler for this domain
handler = new SslCertificate(this.#challengeHandlers, initialCert, initialKey)
handlers.set(configKey, handler)
}
return handler.getSecureContext(config)
}
// middleware that will serve the http challenge to let's encrypt servers
#acmeChallendMiddleware(req, res, token) {
debug('fetching challenge for token ', token)
const challenge = this.#challenges.get(token)
debug('challenge content is ', challenge)
if (challenge === undefined) {
res.statusCode = 404
res.end()
return
}
res.write(challenge)
res.end()
debug('successfully answered challenge ')
}
}

View File

@@ -1,6 +1,8 @@
'use strict'
const RE = /^\s*basic\s+(.+?)\s*$/i
export function parseBasicAuth(header) {
exports.parseBasicAuth = function parseBasicAuth(header) {
if (header === undefined) {
return
}

View File

@@ -10,11 +10,11 @@
## Set up
The proxy is enabled by default, to disable it, add the following lines to your config:
The proxy is disabled by default, to enable it, add the following lines to your config:
```toml
[http.proxy]
enabled = false
enabled = true
```
## Usage

View File

@@ -1,49 +0,0 @@
> This module provides [Let's Encrypt](https://letsencrypt.org/) integration to `xo-proxy` and `xo-server`.
First of all, make sure your server is listening on HTTP on port 80 and on HTTPS 443.
In `xo-server`, to avoid HTTP access, enable the redirection to HTTPs:
```toml
[http]
redirectToHttps = true
```
Your server must be reachable with the configured domain to the certificate provider (e.g. Let's Encrypt), it usually means publicly reachable.
Finally, add the following entries to your HTTPS configuration.
```toml
# Must be set to true for this feature
autoCert = true
# These entries are required and indicates where the certificate and the
# private key will be saved.
cert = 'path/to/cert.pem'
key = 'path/to/key.pem'
# ACME (e.g. Let's Encrypt, ZeroSSL) CA directory
#
# Specifies the URL to the ACME CA's directory.
#
# A identifier `provider/directory` can be passed instead of a URL, see the
# list of supported directories here: https://www.npmjs.com/package/acme-client#directory-urls
#
# Note that the application cannot detect that this value has changed.
#
# In that case delete the certificate and the key files, and restart the
# application to generate new ones.
#
# Default is 'letsencrypt/production'
acmeCa = 'zerossl/production'
# Domain for which the certificate should be created.
#
# This entry is required.
acmeDomain = 'my.domain.net'
# Optional email address which will be used for the certificate creation.
#
# It will be notified of any issues.
acmeEmail = 'admin@my.domain.net'
```

View File

@@ -14,17 +14,16 @@
"url": "https://vates.fr"
},
"license": "AGPL-3.0-or-later",
"version": "0.8.0",
"version": "0.3.1",
"engines": {
"node": ">=15.6"
"node": ">=12"
},
"dependencies": {
"@vates/event-listeners-manager": "^1.0.1",
"@vates/event-listeners-manager": "^1.0.0",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/emit-async": "^1.0.0",
"@xen-orchestra/emit-async": "^0.1.0",
"@xen-orchestra/log": "^0.3.0",
"acme-client": "^5.0.0",
"app-conf": "^2.3.0",
"app-conf": "^2.1.0",
"lodash": "^4.17.21",
"promise-toolbox": "^0.21.0"
},

View File

@@ -9,7 +9,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.1.2",
"version": "0.1.1",
"engines": {
"node": ">=8.10"
},
@@ -30,7 +30,7 @@
"rimraf": "^3.0.0"
},
"dependencies": {
"@vates/read-chunk": "^1.0.0"
"@vates/read-chunk": "^0.1.2"
},
"author": {
"name": "Vates SAS",

View File

@@ -1,23 +1,25 @@
#!/usr/bin/env node
import assert from 'assert'
import colors from 'ansi-colors'
import contentType from 'content-type'
import CSON from 'cson-parser'
import fromCallback from 'promise-toolbox/fromCallback'
import fs from 'fs'
import getopts from 'getopts'
import hrp from 'http-request-plus'
import split2 from 'split2'
import pumpify from 'pumpify'
import { extname } from 'path'
import { format, parse } from 'json-rpc-protocol'
import { inspect } from 'util'
import { load as loadConfig } from 'app-conf'
import { pipeline } from 'stream'
import { readChunk } from '@vates/read-chunk'
'use strict'
const pkg = JSON.parse(fs.readFileSync(new URL('package.json', import.meta.url)))
const assert = require('assert')
const colors = require('ansi-colors')
const contentType = require('content-type')
const CSON = require('cson-parser')
const fromCallback = require('promise-toolbox/fromCallback')
const fs = require('fs')
const getopts = require('getopts')
const hrp = require('http-request-plus')
const split2 = require('split2')
const pumpify = require('pumpify')
const { extname, join } = require('path')
const { format, parse } = require('json-rpc-protocol')
const { inspect } = require('util')
const { load: loadConfig } = require('app-conf')
const { pipeline } = require('stream')
const { readChunk } = require('@vates/read-chunk')
const pkg = require('./package.json')
const FORMATS = {
__proto__: null,
@@ -30,22 +32,30 @@ const parseValue = value => (value.startsWith('json:') ? JSON.parse(value.slice(
async function main(argv) {
const config = await loadConfig('xo-proxy', {
appDir: join(__dirname, '..'),
ignoreUnknownFormats: true,
})
const opts = getopts(argv, {
const { hostname = 'localhost', port } = config?.http?.listen?.https ?? {}
const {
_: args,
file,
help,
host,
raw,
token,
} = getopts(argv, {
alias: { file: 'f', help: 'h' },
boolean: ['help', 'raw'],
default: {
token: config.authenticationToken,
},
stopEarly: true,
string: ['file', 'host', 'token', 'url'],
string: ['file', 'host', 'token'],
})
const { _: args, file } = opts
if (opts.help || (file === '' && args.length === 0)) {
if (help || (file === '' && args.length === 0)) {
return console.log(
'%s',
`Usage:
@@ -70,29 +80,18 @@ ${pkg.name} v${pkg.version}`
const baseRequest = {
headers: {
'content-type': 'application/json',
cookie: `authenticationToken=${token}`,
},
pathname: '/api/v1',
protocol: 'https:',
rejectUnauthorized: false,
}
let { token } = opts
if (opts.url !== '') {
const { protocol, host, username } = new URL(opts.url)
Object.assign(baseRequest, { protocol, host })
if (username !== '') {
token = username
}
if (host !== '') {
baseRequest.host = host
} else {
baseRequest.protocol = 'https:'
if (opts.host !== '') {
baseRequest.host = opts.host
} else {
const { hostname = 'localhost', port } = config?.http?.listen?.https ?? {}
baseRequest.hostname = hostname
baseRequest.port = port
}
baseRequest.hostname = hostname
baseRequest.port = port
}
baseRequest.headers.cookie = `authenticationToken=${token}`
const call = async ({ method, params }) => {
if (callPath.length !== 0) {
process.stderr.write(`\n${colors.bold(`--- call #${callPath.join('.')}`)} ---\n\n`)
@@ -131,7 +130,7 @@ ${pkg.name} v${pkg.version}`
stdout.write(inspect(JSON.parse(line), { colors: true, depth: null }))
stdout.write('\n')
}
} else if (opts.raw && typeof result === 'string') {
} else if (raw && typeof result === 'string') {
stdout.write(result)
} else {
stdout.write(inspect(result, { colors: true, depth: null }))

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/proxy-cli",
"version": "0.3.1",
"version": "0.2.0",
"license": "AGPL-3.0-or-later",
"description": "CLI for @xen-orchestra/proxy",
"keywords": [
@@ -19,16 +19,16 @@
},
"preferGlobal": true,
"bin": {
"xo-proxy-cli": "./index.mjs"
"xo-proxy-cli": "./index.js"
},
"engines": {
"node": ">=14.13"
"node": ">=14"
},
"dependencies": {
"@iarna/toml": "^2.2.0",
"@vates/read-chunk": "^1.0.0",
"@vates/read-chunk": "^0.1.2",
"ansi-colors": "^4.1.1",
"app-conf": "^2.3.0",
"app-conf": "^2.1.0",
"content-type": "^1.0.4",
"cson-parser": "^4.0.7",
"getopts": "^2.2.3",

View File

@@ -1,7 +1,6 @@
import Config from '@xen-orchestra/mixins/Config.mjs'
import Hooks from '@xen-orchestra/mixins/Hooks.mjs'
import HttpProxy from '@xen-orchestra/mixins/HttpProxy.mjs'
import SslCertificate from '@xen-orchestra/mixins/SslCertificate.mjs'
import Config from '@xen-orchestra/mixins/Config.js'
import Hooks from '@xen-orchestra/mixins/Hooks.js'
import HttpProxy from '@xen-orchestra/mixins/HttpProxy.js'
import mixin from '@xen-orchestra/mixin'
import { createDebounceResource } from '@vates/disposable/debounceResource.js'
@@ -15,23 +14,9 @@ import ReverseProxy from './mixins/reverseProxy.mjs'
export default class App {
constructor(opts) {
mixin(
this,
{
Api,
Appliance,
Authentication,
Backups,
Config,
Hooks,
HttpProxy,
Logs,
Remotes,
ReverseProxy,
SslCertificate,
},
[opts]
)
mixin(this, { Api, Appliance, Authentication, Backups, Config, Hooks, HttpProxy, Logs, Remotes, ReverseProxy }, [
opts,
])
const debounceResource = createDebounceResource()
this.config.watchDuration('resourceCacheDelay', delay => {

View File

@@ -1,4 +1,4 @@
import { format, parse, MethodNotFound, JsonRpcError } from 'json-rpc-protocol'
import { format, parse, MethodNotFound } from 'json-rpc-protocol'
import * as errors from 'xo-common/api-errors.js'
import Ajv from 'ajv'
import asyncIteratorToStream from 'async-iterator-to-stream'
@@ -9,26 +9,11 @@ import helmet from 'koa-helmet'
import Koa from 'koa'
import once from 'lodash/once.js'
import Router from '@koa/router'
import stubTrue from 'lodash/stubTrue.js'
import Zone from 'node-zone'
import { createLogger } from '@xen-orchestra/log'
const { debug, warn } = createLogger('xo:proxy:api')
// format an error to JSON-RPC but do not hide non JSON-RPC errors
function formatError(responseId, error) {
if (error != null && typeof error.toJsonRpcError !== 'function') {
const { message, ...data } = error
// force these entries even if they are not enumerable
data.code = error.code
data.stack = error.stack
error = new JsonRpcError(error.message, undefined, data)
}
return format.error(responseId, error)
}
const ndJsonStream = asyncIteratorToStream(async function* (responseId, iterable) {
try {
let cursor, iterator
@@ -39,7 +24,7 @@ const ndJsonStream = asyncIteratorToStream(async function* (responseId, iterable
cursor = await iterator.next()
yield format.response(responseId, { $responseType: 'ndjson' }) + '\n'
} catch (error) {
yield formatError(responseId, error)
yield format.error(responseId, error)
throw error
}
@@ -78,7 +63,7 @@ export default class Api {
try {
body = parse(body)
} catch (error) {
ctx.body = formatError(null, error)
ctx.body = format.error(null, error)
return
}
@@ -92,7 +77,7 @@ export default class Api {
const { method, params } = body
warn('call error', { method, params, error })
ctx.set('Content-Type', 'application/json')
ctx.body = formatError(body.id, error)
ctx.body = format.error(body.id, error)
return
}
@@ -181,20 +166,14 @@ export default class Api {
throw errors.noSuchObject('method', name)
}
const { description, params = {}, result = {} } = method
return { description, name, params, result }
const { description, params = {} } = method
return { description, name, params }
},
{
description: 'returns the signature of an API method',
params: {
method: { type: 'string' },
},
result: {
description: { type: 'string' },
name: { type: 'string' },
params: { type: 'object' },
result: { type: 'object' },
},
},
],
},
@@ -226,29 +205,40 @@ export default class Api {
})
}
addMethod(name, method, { description, params = {}, result: resultSchema } = {}) {
addMethod(name, method, { description, params = {} } = {}) {
const methods = this._methods
if (name in methods) {
throw new Error(`API method ${name} already exists`)
}
const validateParams = this.#compileSchema(params)
const validateResult = this.#compileSchema(resultSchema)
const ajv = this._ajv
const validate = ajv.compile({
// we want additional properties to be disabled by default
additionalProperties: params['*'] || false,
const m = async params => {
if (!validateParams(params)) {
throw errors.invalidParameters(validateParams.errors)
properties: params,
// we want params to be required by default unless explicitly marked so
// we use property `optional` instead of object `required`
required: Object.keys(params).filter(name => {
const param = params[name]
const required = !param.optional
delete param.optional
return required
}),
type: 'object',
})
const m = params => {
if (!validate(params)) {
throw errors.invalidParameters(validate.errors)
}
const result = await method(params)
if (!validateResult(result)) {
warn('invalid API method result', { errors: validateResult.error, result })
}
return result
return method(params)
}
m.description = description
m.params = params
m.result = resultSchema
methods[name] = m
@@ -299,43 +289,4 @@ export default class Api {
}
return fn(params)
}
#compileSchema(schema) {
if (schema === undefined) {
return stubTrue
}
if (schema.type === undefined) {
schema = { type: 'object', properties: schema }
}
const { type } = schema
if (Array.isArray(type) ? type.includes('object') : type === 'object') {
const { properties = {} } = schema
if (schema.additionalProperties === undefined) {
const wildCard = properties['*']
if (wildCard === undefined) {
// we want additional properties to be disabled by default
schema.additionalProperties = false
} else {
delete properties['*']
schema.additionalProperties = wildCard
}
}
// we want properties to be required by default unless explicitly marked so
// we use property `optional` instead of object `required`
if (schema.required === undefined) {
schema.required = Object.keys(properties).filter(name => {
const param = properties[name]
const required = !param.optional
delete param.optional
return required
})
}
}
return this._ajv.compile(schema)
}
}

View File

@@ -407,7 +407,6 @@ export default class Backups {
debounceResource: app.debounceResource.bind(app),
dirMode: app.config.get('backups.dirMode'),
vhdDirectoryCompression: app.config.get('backups.vhdDirectoryCompression'),
useGetDiskLegacy: app.config.getOptional('backups.useGetDiskLegacy'),
})
}

View File

@@ -22,6 +22,27 @@ disableMergeWorker = false
snapshotNameLabelTpl = '[XO Backup {job.name}] {vm.name_label}'
vhdDirectoryCompression = 'brotli'
[backups.defaultSettings]
reportWhen = 'failure'
[backups.metadata.defaultSettings]
retentionPoolMetadata = 0
retentionXoMetadata = 0
[backups.vm.defaultSettings]
bypassVdiChainsCheck = false
checkpointSnapshot = false
concurrency = 2
copyRetention = 0
deleteFirst = false
exportRetention = 0
fullInterval = 0
offlineBackup = false
offlineSnapshot = false
snapshotRetention = 0
timeout = 0
vmTimeout = 0
# This is a work-around.
#
# See https://github.com/vatesfr/xen-orchestra/pull/4674

View File

@@ -56,32 +56,11 @@ ${APP_NAME} v${APP_VERSION}
createSecureServer: opts => createSecureServer({ ...opts, allowHTTP1: true }),
})
forOwn(config.http.listen, async ({ autoCert, cert, key, ...opts }, configKey) => {
const useAcme = autoCert && opts.acmeDomain !== undefined
// don't pass these entries to httpServer.listen(opts)
for (const key of Object.keys(opts).filter(_ => _.startsWith('acme'))) {
delete opts[key]
}
forOwn(config.http.listen, async ({ autoCert, cert, key, ...opts }) => {
try {
let niceAddress
if (cert !== undefined && key !== undefined) {
if (useAcme) {
opts.SNICallback = async (serverName, callback) => {
try {
// injected by mixins/SslCertificate
const secureContext = await httpServer.getSecureContext(serverName, configKey, opts.cert, opts.key)
callback(null, secureContext)
} catch (error) {
warn(error)
callback(error, null)
}
}
}
niceAddress = await pRetry(
async () => {
const niceAddress = await pRetry(
async () => {
if (cert !== undefined && key !== undefined) {
try {
opts.cert = fse.readFileSync(cert)
opts.key = fse.readFileSync(key)
@@ -97,22 +76,20 @@ ${APP_NAME} v${APP_VERSION}
opts.cert = pems.cert
opts.key = pems.key
}
return httpServer.listen(opts)
},
{
tries: 2,
when: e => autoCert && e.code === 'ERR_SSL_EE_KEY_TOO_SMALL',
onRetry: () => {
warn('deleting invalid certificate')
fse.unlinkSync(cert)
fse.unlinkSync(key)
},
}
)
} else {
niceAddress = await httpServer.listen(opts)
}
return httpServer.listen(opts)
},
{
tries: 2,
when: e => autoCert && e.code === 'ERR_SSL_EE_KEY_TOO_SMALL',
onRetry: () => {
warn('deleting invalid certificate')
fse.unlinkSync(cert)
fse.unlinkSync(key)
},
}
)
info(`Web server listening on ${niceAddress}`)
} catch (error) {
@@ -169,7 +146,6 @@ ${APP_NAME} v${APP_VERSION}
process.on(signal, () => {
if (alreadyCalled) {
warn('forced exit')
// eslint-disable-next-line n/no-process-exit
process.exit(1)
}
alreadyCalled = true
@@ -188,7 +164,6 @@ main(process.argv.slice(2)).then(
error => {
fatal(error)
// eslint-disable-next-line n/no-process-exit
process.exit(1)
}
)

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.26.2",
"version": "0.22.0",
"license": "AGPL-3.0-or-later",
"description": "XO Proxy used to remotely execute backup jobs",
"keywords": [
@@ -26,33 +26,33 @@
},
"dependencies": {
"@iarna/toml": "^2.2.0",
"@koa/router": "^12.0.0",
"@koa/router": "^10.0.0",
"@vates/cached-dns.lookup": "^1.0.0",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.28.0",
"@xen-orchestra/fs": "^3.1.0",
"@xen-orchestra/backups": "^0.22.0",
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.8.0",
"@xen-orchestra/self-signed": "^0.1.3",
"@xen-orchestra/xapi": "^1.5.0",
"@xen-orchestra/mixins": "^0.3.1",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/xapi": "^0.11.0",
"ajv": "^8.0.3",
"app-conf": "^2.3.0",
"app-conf": "^2.1.0",
"async-iterator-to-stream": "^1.1.0",
"fs-extra": "^10.0.0",
"get-stream": "^6.0.0",
"getopts": "^2.2.3",
"golike-defer": "^0.5.1",
"http-server-plus": "^0.12.0",
"http-server-plus": "^0.11.0",
"http2-proxy": "^5.0.53",
"json-rpc-protocol": "^0.13.1",
"jsonrpc-websocket-client": "^0.7.2",
"koa": "^2.5.1",
"koa-compress": "^5.0.1",
"koa-helmet": "^6.1.0",
"koa-helmet": "^5.1.0",
"lodash": "^4.17.10",
"node-zone": "^0.4.0",
"parse-pairs": "^1.0.0",
@@ -60,7 +60,7 @@
"source-map-support": "^0.5.16",
"stoppable": "^1.0.6",
"xdg-basedir": "^5.1.0",
"xen-api": "^1.2.2",
"xen-api": "^1.2.0",
"xo-common": "^0.8.0"
},
"devDependencies": {

View File

@@ -2,23 +2,22 @@
const { execFile } = require('child_process')
const RE =
/^(-----BEGIN PRIVATE KEY-----.+-----END PRIVATE KEY-----\n)(-----BEGIN CERTIFICATE-----.+-----END CERTIFICATE-----\n)$/s
exports.genSelfSignedCert = async ({ days = 360 } = {}) =>
const openssl = (cmd, args, { input, ...opts } = {}) =>
new Promise((resolve, reject) => {
execFile(
'openssl',
['req', '-batch', '-new', '-x509', '-days', String(days), '-nodes', '-newkey', 'rsa:2048', '-keyout', '-'],
(error, stdout) => {
if (error != null) {
return reject(error)
}
const matches = RE.exec(stdout)
if (matches === null) {
return reject(new Error('stdout does not match regular expression'))
}
const [, key, cert] = matches
resolve({ cert, key })
}
const child = execFile('openssl', [cmd, ...args], opts, (error, stdout) =>
error != null ? reject(error) : resolve(stdout)
)
if (input !== undefined) {
child.stdin.end(input)
}
})
exports.genSelfSignedCert = async ({ days = 360 } = {}) => {
const key = await openssl('genrsa', ['2048'])
return {
cert: await openssl('req', ['-batch', '-new', '-key', '-', '-x509', '-days', String(days), '-nodes'], {
input: key,
}),
key,
}
}

View File

@@ -9,7 +9,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.1.3",
"version": "0.1.0",
"engines": {
"node": ">=8.10"
},

View File

@@ -0,0 +1,3 @@
'use strict'
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -0,0 +1 @@
../../scripts/babel-eslintrc.js

View File

@@ -14,13 +14,31 @@
"name": "Vates SAS",
"url": "https://vates.fr"
},
"preferGlobal": false,
"main": "dist/",
"browserslist": [
">2%"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"lodash": "^4.17.15"
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish --access public"
},
"dependencies": {
"lodash": "^4.17.15"
}
}

View File

@@ -1,10 +1,8 @@
'use strict'
const escapeRegExp = require('lodash/escapeRegExp')
import escapeRegExp from 'lodash/escapeRegExp'
const compareLengthDesc = (a, b) => b.length - a.length
exports.compileTemplate = function compileTemplate(pattern, rules) {
export function compileTemplate(pattern, rules) {
const matches = Object.keys(rules).sort(compareLengthDesc).map(escapeRegExp).join('|')
const regExp = new RegExp(`\\\\(?:\\\\|${matches})|${matches}`, 'g')
return (...params) =>

View File

@@ -1,8 +1,5 @@
/* eslint-env jest */
'use strict'
const { compileTemplate } = require('.')
import { compileTemplate } from '.'
it("correctly replaces the template's variables", () => {
const replacer = compileTemplate('{property}_\\{property}_\\\\{property}_{constant}_%_FOO', {

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/upload-ova",
"version": "0.1.5",
"version": "0.1.4",
"license": "AGPL-3.0-or-later",
"description": "Basic CLI to upload ova files to Xen-Orchestra",
"keywords": [
@@ -43,7 +43,7 @@
"pw": "^0.0.4",
"xdg-basedir": "^4.0.0",
"xo-lib": "^0.11.1",
"xo-vmdk-to-vhd": "^2.4.3"
"xo-vmdk-to-vhd": "^2.3.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1,9 +0,0 @@
'use strict'
// TODO: remove when Node >=15.0
module.exports = class AggregateError extends Error {
constructor(errors, message) {
super(message)
this.errors = errors
}
}

View File

@@ -1,131 +0,0 @@
# VM Sync Hook
> This feature is currently _unstable_ and might change or be removed in the future.
>
> Feedbacks are very welcome on the [project bugtracker](https://github.com/vatesfr/xen-orchestra/issues).
> This feature is not currently supported for backups done with XO Proxy.
Before snapshotting (with or without memory, ie checkpoint), XO can notify the VM via an HTTP request.
A typical use case is to make sure the VM is in a consistent state during the snapshot process, for instance by making sure database writes are flushed to the disk.
> This request will only be sent if the VM is in a running state.
## Configuration
The feature is opt-in via a tag on the VM: `xo:notify-on-snapshot`.
By default, it will be an HTTPS request on the port `1727`, on the first IP address reported by the VM.
If the _VM Tools_ (i.e. management agent) are not installed on the VM or if you which to use another URL, you can specify this in the tag: `xo:notify-on-snapshot=<URL>`.
To guarantee the request comes from XO, a secret must be provided in the `xo-server`'s (and `xo-proxy` if relevant) configuration:
```toml
[xapiOptions]
syncHookSecret = 'unique long string to ensure the request comes from XO'
```
## Specification
XO will waits for the request to be answered before starting the snapshot, but will not wait longer than _1 minute_.
If the request fails for any reason, XO will go ahead with snapshot immediately.
```http
GET /sync HTTP/1.1
Authorization: Bearer dW5pcXVlIGxvbmcgc3RyaW5nIHRvIGVuc3VyZSB0aGUgcmVxdWVzdCBjb21lcyBmcm9tIFhP
```
When the snapshot is finished, another request will be sent:
```http
GET /post-sync HTTP/1.1
Authorization: Bearer dW5pcXVlIGxvbmcgc3RyaW5nIHRvIGVuc3VyZSB0aGUgcmVxdWVzdCBjb21lcyBmcm9tIFhP
```
The created snapshot will have the special `xo:synced` tag set to make it identifiable.
## Example server in Node
`index.cjs`:
```js
const exec = require('node:util').promisify(require('node:child_process').execFile)
const SECRET = 'unique long string to ensure the request comes from XO'
const HANDLERS = {
__proto__: null,
async '/sync'() {
// actions to do before the VM is snapshotted
// in this example, the Linux command `sync` is called:
await exec('sync')
},
async '/post-sync'() {
// actions to do after the VM is snapshotted
},
}
function checkAuthorization(req) {
try {
const { authorization } = req.headers
if (authorization !== undefined) {
const parts = authorization.split(' ')
if (parts.length >= 1 && parts[0].toLowerCase() === 'bearer') {
return Buffer.from(parts[1], 'base64').toString() === SECRET
}
}
} catch (error) {
console.warn('checkAuthorization', error)
}
return false
}
async function main() {
// generate a self-signed certificate
const [, key, cert] =
/^(-----BEGIN PRIVATE KEY-----.+-----END PRIVATE KEY-----\n)(-----BEGIN CERTIFICATE-----.+-----END CERTIFICATE-----\n)$/s.exec(
(await exec('openssl', ['req', '-batch', '-new', '-x509', '-nodes', '-newkey', 'rsa:2048', '-keyout', '-']))
.stdout
)
const server = require('node:https').createServer({ cert, key }, async function onRequest(req, res) {
if (!checkAuthorization(req)) {
res.statusCode = 403
return res.end('Forbidden')
}
const handler = HANDLERS[req.url.split('?')[0]]
if (handler === undefined || req.method !== 'GET') {
res.statusCode = 404
return res.end('Not Found')
}
try {
await handler()
res.statusCode = 200
res.end('Ok')
} catch (error) {
console.warn(error)
if (!res.headersSent) {
res.statusCode = 500
res.write('Internal Error')
}
res.end()
}
})
await new Promise((resolve, reject) => {
server.on('close', resolve).on('error', reject).listen(1727)
})
}
main().catch(console.warn)
```

View File

@@ -102,8 +102,6 @@ class Xapi extends Base {
constructor({
callRetryWhenTooManyPendingTasks = { delay: 5e3, tries: 10 },
maxUncoalescedVdis,
syncHookSecret,
syncHookTimeout,
vdiDestroyRetryWhenInUse = { delay: 5e3, tries: 10 },
...opts
}) {
@@ -114,8 +112,6 @@ class Xapi extends Base {
when: { code: 'TOO_MANY_PENDING_TASKS' },
}
this._maxUncoalescedVdis = maxUncoalescedVdis
this._syncHookSecret = syncHookSecret
this._syncHookTimeout = syncHookTimeout
this._vdiDestroyRetryWhenInUse = {
...vdiDestroyRetryWhenInUse,
onRetry,
@@ -234,9 +230,8 @@ function mixin(mixins) {
defineProperties(xapiProto, descriptors)
}
mixin({
host: require('./host.js'),
SR: require('./sr.js'),
task: require('./task.js'),
host: require('./host.js'),
VBD: require('./vbd.js'),
VDI: require('./vdi.js'),
VIF: require('./vif.js'),

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/xapi",
"version": "1.5.0",
"version": "0.11.0",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
@@ -15,7 +15,7 @@
"node": ">=14"
},
"peerDependencies": {
"xen-api": "^1.2.2"
"xen-api": "^1.2.0"
},
"scripts": {
"postversion": "npm publish --access public"
@@ -26,11 +26,8 @@
"@xen-orchestra/log": "^0.3.0",
"d3-time-format": "^3.0.0",
"golike-defer": "^0.5.1",
"http-request-plus": "^0.14.0",
"json-rpc-protocol": "^0.13.2",
"lodash": "^4.17.15",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^4.1.0",
"xo-common": "^0.8.0"
},
"private": false,

View File

@@ -1,179 +0,0 @@
'use strict'
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const { decorateClass } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
const { incorrectState } = require('xo-common/api-errors')
const { VDI_FORMAT_VHD } = require('./index.js')
const assert = require('node:assert').strict
const peekFooterFromStream = require('vhd-lib/peekFooterFromVhdStream')
const AggregateError = require('./_AggregateError.js')
const { warn } = require('@xen-orchestra/log').createLogger('xo:xapi:sr')
const OC_MAINTENANCE = 'xo:maintenanceState'
class Sr {
async create({
content_type = 'user', // recommended by Citrix
device_config,
host,
name_description = '',
name_label,
physical_size = 0,
shared,
sm_config = {},
type,
}) {
const ref = await this.call(
'SR.create',
host,
device_config,
physical_size,
name_label,
name_description,
type,
content_type,
shared,
sm_config
)
// https://developer-docs.citrix.com/projects/citrix-hypervisor-sdk/en/latest/xc-api-extensions/#sr
this.setFieldEntry('SR', ref, 'other_config', 'auto-scan', 'true').catch(warn)
return ref
}
// Switch the SR to maintenance mode:
// - shutdown all running VMs with a VDI on this SR
// - their UUID is saved into SR.other_config[OC_MAINTENANCE].shutdownVms
// - clean shutdown is attempted, and falls back to a hard shutdown
// - unplug all connected hosts from this SR
async enableMaintenanceMode($defer, ref, { vmsToShutdown = [] } = {}) {
const state = { timestamp: Date.now() }
// will throw if already in maintenance mode
await this.call('SR.add_to_other_config', ref, OC_MAINTENANCE, JSON.stringify(state))
await $defer.onFailure.call(this, 'call', 'SR.remove_from_other_config', ref, OC_MAINTENANCE)
const runningVms = new Map()
const handleVbd = async ref => {
const vmRef = await this.getField('VBD', ref, 'VM')
if (!runningVms.has(vmRef)) {
const power_state = await this.getField('VM', vmRef, 'power_state')
const isPaused = power_state === 'Paused'
if (isPaused || power_state === 'Running') {
runningVms.set(vmRef, isPaused)
}
}
}
await asyncMap(await this.getField('SR', ref, 'VDIs'), async ref => {
await asyncMap(await this.getField('VDI', ref, 'VBDs'), handleVbd)
})
{
const runningVmUuids = await asyncMap(runningVms.keys(), ref => this.getField('VM', ref, 'uuid'))
const set = new Set(vmsToShutdown)
for (const vmUuid of runningVmUuids) {
if (!set.has(vmUuid)) {
throw incorrectState({
actual: vmsToShutdown,
expected: runningVmUuids,
property: 'vmsToShutdown',
})
}
}
}
state.shutdownVms = {}
await asyncMapSettled(runningVms, async ([ref, isPaused]) => {
state.shutdownVms[await this.getField('VM', ref, 'uuid')] = isPaused
try {
await this.callAsync('VM.clean_shutdown', ref)
} catch (error) {
warn('SR_enableMaintenanceMode, VM clean shutdown', { error })
await this.callAsync('VM.hard_shutdown', ref)
}
$defer.onFailure.call(this, 'callAsync', 'VM.start', ref, isPaused, true)
})
state.unpluggedPbds = []
await asyncMapSettled(await this.getField('SR', ref, 'PBDs'), async ref => {
if (await this.getField('PBD', ref, 'currently_attached')) {
state.unpluggedPbds.push(await this.getField('PBD', ref, 'uuid'))
await this.callAsync('PBD.unplug', ref)
$defer.onFailure.call(this, 'callAsync', 'PBD.plug', ref)
}
})
await this.setFieldEntry('SR', ref, 'other_config', OC_MAINTENANCE, JSON.stringify(state))
}
// this method is best effort and will not stop on first error
async disableMaintenanceMode(ref) {
const state = JSON.parse((await this.getField('SR', ref, 'other_config'))[OC_MAINTENANCE])
// will throw if not in maintenance mode
await this.call('SR.remove_from_other_config', ref, OC_MAINTENANCE)
const errors = []
await asyncMap(state.unpluggedPbds, async uuid => {
try {
await this.callAsync('PBD.plug', await this.call('PBD.get_by_uuid', uuid))
} catch (error) {
errors.push(error)
}
})
await asyncMap(Object.entries(state.shutdownVms), async ([uuid, isPaused]) => {
try {
await this.callAsync('VM.start', await this.call('VM.get_by_uuid', uuid), isPaused, true)
} catch (error) {
errors.push(error)
}
})
if (errors.length !== 0) {
throw new AggregateError(errors)
}
}
async importVdi(
$defer,
ref,
stream,
{
format = VDI_FORMAT_VHD,
name_label = '[XO] Imported disk - ' + new Date().toISOString(),
virtual_size,
...vdiCreateOpts
} = {}
) {
if (virtual_size === undefined) {
if (format === VDI_FORMAT_VHD) {
const footer = await peekFooterFromStream(stream)
virtual_size = footer.currentSize
} else {
virtual_size = stream.length
assert.notEqual(virtual_size, undefined)
}
}
const vdiRef = await this.VDI_create({ ...vdiCreateOpts, name_label, SR: ref, virtual_size })
$defer.onFailure.call(this, 'callAsync', 'VDI.destroy', vdiRef)
await this.VDI_importContent(vdiRef, stream, { format })
return vdiRef
}
}
module.exports = Sr
decorateClass(Sr, { enableMaintenanceMode: defer, importVdi: defer })

View File

@@ -6,8 +6,6 @@ const { Ref } = require('xen-api')
const isVmRunning = require('./_isVmRunning.js')
const { warn } = require('@xen-orchestra/log').createLogger('xo:xapi:vbd')
const noop = Function.prototype
module.exports = class Vbd {
@@ -68,10 +66,8 @@ module.exports = class Vbd {
})
if (isVmRunning(powerState)) {
this.callAsync('VBD.plug', vbdRef).catch(warn)
await this.callAsync('VBD.plug', vbdRef)
}
return vbdRef
}
async unplug(ref) {

View File

@@ -1,6 +1,5 @@
'use strict'
const assert = require('node:assert').strict
const CancelToken = require('promise-toolbox/CancelToken')
const pCatch = require('promise-toolbox/catch')
const pRetry = require('promise-toolbox/retry')
@@ -31,7 +30,8 @@ class Vdi {
other_config = {},
read_only = false,
sharable = false,
SR = this.pool.default_SR,
sm_config,
SR,
tags,
type = 'user',
virtual_size,
@@ -39,10 +39,10 @@ class Vdi {
},
{
// blindly copying `sm_config` from another VDI can create problems,
// therefore it should be passed explicitly
// therefore it is ignored by default by this method
//
// see https://github.com/vatesfr/xen-orchestra/issues/4482
sm_config,
setSmConfig = false,
} = {}
) {
return this.call('VDI.create', {
@@ -51,7 +51,7 @@ class Vdi {
other_config,
read_only,
sharable,
sm_config,
sm_config: setSmConfig ? sm_config : undefined,
SR,
tags,
type,
@@ -87,8 +87,6 @@ class Vdi {
}
async importContent(ref, stream, { cancelToken = CancelToken.none, format }) {
assert.notEqual(format, undefined)
if (stream.length === undefined) {
throw new Error('Trying to import a VDI without a length field. Please report this error to Xen Orchestra.')
}

View File

@@ -2,7 +2,6 @@
const CancelToken = require('promise-toolbox/CancelToken')
const groupBy = require('lodash/groupBy.js')
const hrp = require('http-request-plus')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const pickBy = require('lodash/pickBy.js')
const omit = require('lodash/omit.js')
@@ -12,8 +11,7 @@ const { asyncMap } = require('@xen-orchestra/async-map')
const { createLogger } = require('@xen-orchestra/log')
const { decorateClass } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
const { incorrectState, forbiddenOperation } = require('xo-common/api-errors.js')
const { JsonRpcError } = require('json-rpc-protocol')
const { incorrectState } = require('xo-common/api-errors.js')
const { Ref } = require('xen-api')
const extractOpaqueRef = require('./_extractOpaqueRef.js')
@@ -47,31 +45,6 @@ const cleanBiosStrings = biosStrings => {
}
}
// See: https://github.com/xapi-project/xen-api/blob/324bc6ee6664dd915c0bbe57185f1d6243d9ed7e/ocaml/xapi/xapi_guest_agent.ml#L59-L81
//
// Returns <min(n)>/ip || <min(n)>/ipv4/<min(m)> || <min(n)>/ipv6/<min(m)> || undefined
// where n corresponds to the network interface and m to its IP
const IPV4_KEY_RE = /^\d+\/ip(?:v4\/\d+)?$/
const IPV6_KEY_RE = /^\d+\/ipv6\/\d+$/
function getVmAddress(networks) {
if (networks !== undefined) {
let ipv6
for (const key of Object.keys(networks).sort()) {
if (IPV4_KEY_RE.test(key)) {
return networks[key]
}
if (ipv6 === undefined && IPV6_KEY_RE.test(key)) {
ipv6 = networks[key]
}
}
if (ipv6 !== undefined) {
return ipv6
}
}
throw new Error('no VM address found')
}
async function listNobakVbds(xapi, vbdRefs) {
const vbds = []
await asyncMap(vbdRefs, async vbdRef => {
@@ -158,51 +131,6 @@ class Vm {
}
}
async _httpHook({ guest_metrics, power_state, tags, uuid }, pathname) {
if (power_state !== 'Running') {
return
}
let url
let i = tags.length
do {
if (i === 0) {
return
}
const tag = tags[--i]
if (tag === 'xo:notify-on-snapshot') {
const { networks } = await this.getRecord('VM_guest_metrics', guest_metrics)
url = Object.assign(new URL('https://locahost'), {
hostname: getVmAddress(networks),
port: 1727,
})
} else {
const prefix = 'xo:notify-on-snapshot='
if (tag.startsWith(prefix)) {
url = new URL(tag.slice(prefix.length))
}
}
} while (url === undefined)
url.pathname = pathname
const headers = {}
const secret = this._asyncHookSecret
if (secret !== undefined) {
headers.authorization = 'Bearer ' + Buffer.from(secret).toString('base64')
}
try {
await hrp(url, {
headers,
rejectUnauthorized: false,
timeout: this._syncHookTimeout ?? 60e3,
})
} catch (error) {
warn('HTTP hook failed', { error, url, vm: uuid })
}
}
async assertHealthyVdiChains(vmRef, tolerance = this._maxUncoalescedVdis) {
const vdiRefs = {}
;(await this.getRecords('VBD', await this.getField('VM', vmRef, 'VBDs'))).forEach(({ VDI: ref }) => {
@@ -219,8 +147,6 @@ class Vm {
async checkpoint($defer, vmRef, { cancelToken = CancelToken.none, ignoreNobakVdis = false, name_label } = {}) {
const vm = await this.getRecord('VM', vmRef)
await this._httpHook(vm, '/sync')
let destroyNobakVdis = false
if (ignoreNobakVdis) {
@@ -241,9 +167,6 @@ class Vm {
try {
const ref = await this.callAsync(cancelToken, 'VM.checkpoint', vmRef, name_label).then(extractOpaqueRef)
// detached async
this._httpHook(vm, '/post-sync').catch(noop)
// VM checkpoints are marked as templates, unfortunately it does not play well with XVA export/import
// which will import them as templates and not VM checkpoints or plain VMs
await pCatch.call(
@@ -420,13 +343,7 @@ class Vm {
const vm = await this.getRecord('VM', vmRef)
if (!bypassBlockedOperation && 'destroy' in vm.blocked_operations) {
throw forbiddenOperation(
`destroy is blocked: ${
vm.blocked_operations.destroy === 'true'
? 'protected from accidental deletion'
: vm.blocked_operations.destroy
}`
)
throw new Error('destroy is blocked')
}
if (!forceDeleteDefaultTemplate && isDefaultTemplate(vm)) {
@@ -586,22 +503,6 @@ class Vm {
}
return ref
} catch (error) {
if (
// xxhash is the new form consistency hashing in CH 8.1 which uses a faster,
// more efficient hashing algorithm to generate the consistency checks
// in order to support larger files without the consistency checking process taking an incredibly long time
error.code === 'IMPORT_ERROR' &&
error.params?.some(
param =>
param.includes('INTERNAL_ERROR') &&
param.includes('Expected to find an inline checksum') &&
param.includes('.xxhash')
)
) {
warn('import', { error })
throw new JsonRpcError('Importing this VM requires XCP-ng or Citrix Hypervisor >=8.1')
}
// augment the error with as much relevant info as possible
const [poolMaster, sr] = await Promise.all([
safeGetRecord(this, 'host', this.pool.master),
@@ -613,33 +514,12 @@ class Vm {
}
}
async snapshot(
$defer,
vmRef,
{ cancelToken = CancelToken.none, ignoreNobakVdis = false, name_label, unplugVusbs = false } = {}
) {
async snapshot($defer, vmRef, { cancelToken = CancelToken.none, ignoreNobakVdis = false, name_label } = {}) {
const vm = await this.getRecord('VM', vmRef)
await this._httpHook(vm, '/sync')
const isHalted = vm.power_state === 'Halted'
// requires the VM to be halted because it's not possible to re-plug VUSB on a live VM
if (unplugVusbs && isHalted) {
// vm.VUSBs can be undefined (e.g. on XS 7.0.0)
const vusbs = vm.VUSBs
if (vusbs !== undefined) {
await asyncMap(vusbs, async ref => {
const vusb = await this.getRecord('VUSB', ref)
await vusb.$call('destroy')
$defer.call(this, 'call', 'VUSB.create', vusb.VM, vusb.USB_group, vusb.other_config)
})
}
}
let destroyNobakVdis = false
if (ignoreNobakVdis) {
if (isHalted) {
if (vm.power_state === 'Halted') {
await asyncMap(await listNobakVbds(this, vm.VBDs), async vbd => {
await this.VBD_destroy(vbd.$ref)
$defer.call(this, 'VBD_create', vbd)
@@ -724,9 +604,6 @@ class Vm {
ref = await this.callAsync(cancelToken, 'VM.snapshot', vmRef, name_label).then(extractOpaqueRef)
} while (false)
// detached async
this._httpHook(vm, '/post-sync').catch(noop)
// VM snapshots are marked as templates, unfortunately it does not play well with XVA export/import
// which will import them as templates and not VM snapshots or plain VMs
await pCatch.call(

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More