diff --git a/@xen-orchestra/backups/Backup.js b/@xen-orchestra/backups/Backup.js
index 33264a271..f0e019b85 100644
--- a/@xen-orchestra/backups/Backup.js
+++ b/@xen-orchestra/backups/Backup.js
@@ -43,6 +43,7 @@ const DEFAULT_VM_SETTINGS = {
offlineSnapshot: false,
snapshotRetention: 0,
timeout: 0,
+ useNbd: false,
unconditionalSnapshot: false,
vmTimeout: 0,
}
diff --git a/@xen-orchestra/backups/RemoteAdapter.js b/@xen-orchestra/backups/RemoteAdapter.js
index 7e301884e..2a0fd252e 100644
--- a/@xen-orchestra/backups/RemoteAdapter.js
+++ b/@xen-orchestra/backups/RemoteAdapter.js
@@ -659,9 +659,8 @@ class RemoteAdapter {
return path
}
- async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency } = {}) {
+ async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency, nbdClient } = {}) {
const handler = this._handler
-
if (this.#useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
await createVhdDirectoryFromStream(handler, dataPath, input, {
@@ -671,6 +670,7 @@ class RemoteAdapter {
await input.task
return validator.apply(this, arguments)
},
+ nbdClient,
})
await VhdAbstract.createAlias(handler, path, dataPath)
} else {
diff --git a/@xen-orchestra/backups/package.json b/@xen-orchestra/backups/package.json
index d69ca66c0..437f7907c 100644
--- a/@xen-orchestra/backups/package.json
+++ b/@xen-orchestra/backups/package.json
@@ -22,6 +22,7 @@
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.2",
"@vates/fuse-vhd": "^1.0.0",
+ "@vates/nbd-client": "*",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^3.2.0",
diff --git a/@xen-orchestra/backups/writers/DeltaBackupWriter.js b/@xen-orchestra/backups/writers/DeltaBackupWriter.js
index e2034ebfe..6435b7078 100644
--- a/@xen-orchestra/backups/writers/DeltaBackupWriter.js
+++ b/@xen-orchestra/backups/writers/DeltaBackupWriter.js
@@ -19,8 +19,9 @@ const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
const { checkVhd } = require('./_checkVhd.js')
const { packUuid } = require('./_packUuid.js')
const { Disposable } = require('promise-toolbox')
+const NbdClient = require('@vates/nbd-client')
-const { warn } = createLogger('xo:backups:DeltaBackupWriter')
+const { debug, warn } = createLogger('xo:backups:DeltaBackupWriter')
exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
async checkBaseVdis(baseUuidToSrcVdi) {
@@ -199,12 +200,30 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
await checkVhd(handler, parentPath)
}
+ const vdiRef = vm.$xapi.getObject(vdi.uuid).$ref
+
+ let nbdClient
+ if (!this._backup.config.useNbd) {
+ // get nbd if possible
+ try {
+ // this will always take the first host in the list
+ const [nbdInfo] = await vm.$xapi.call('VDI.get_nbd_info', vdiRef)
+ nbdClient = new NbdClient(nbdInfo)
+ await nbdClient.connect()
+ debug(`got nbd connection `, { vdi: vdi.uuid })
+ } catch (error) {
+ nbdClient = undefined
+ debug(`can't connect to nbd server or no server available`, { error })
+ }
+ }
+
await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._backup.config.writeBlockConcurrency,
+ nbdClient,
})
if (isDelta) {
diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md
index d5f47de32..102416538 100644
--- a/CHANGELOG.unreleased.md
+++ b/CHANGELOG.unreleased.md
@@ -7,6 +7,8 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
+- [Delta Backup] Use [NBD](https://en.wikipedia.org/wiki/Network_block_device) to download disks (PR [#6461](https://github.com/vatesfr/xen-orchestra/pull/6461))
+
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
@@ -27,4 +29,7 @@
+- xo-server minor
+- xo-web minor
+
diff --git a/packages/vhd-lib/createVhdDirectoryFromStream.js b/packages/vhd-lib/createVhdDirectoryFromStream.js
index 2bf6e120e..1840cb4d9 100644
--- a/packages/vhd-lib/createVhdDirectoryFromStream.js
+++ b/packages/vhd-lib/createVhdDirectoryFromStream.js
@@ -8,10 +8,10 @@ const { asyncEach } = require('@vates/async-each')
const { warn } = createLogger('vhd-lib:createVhdDirectoryFromStream')
-const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression }) {
+const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression, nbdClient }) {
const vhd = yield VhdDirectory.create(handler, path, { compression })
await asyncEach(
- parseVhdStream(inputStream),
+ parseVhdStream(inputStream, nbdClient),
async function (item) {
switch (item.type) {
case 'footer':
@@ -44,10 +44,10 @@ exports.createVhdDirectoryFromStream = async function createVhdDirectoryFromStre
handler,
path,
inputStream,
- { validator, concurrency = 16, compression } = {}
+ { validator, concurrency = 16, compression, nbdClient } = {}
) {
try {
- await buildVhd(handler, path, inputStream, { concurrency, compression })
+ await buildVhd(handler, path, inputStream, { concurrency, compression, nbdClient })
if (validator !== undefined) {
await validator.call(this, path)
}
diff --git a/packages/vhd-lib/parseVhdStream.js b/packages/vhd-lib/parseVhdStream.js
index ed03e68e1..6188c7914 100644
--- a/packages/vhd-lib/parseVhdStream.js
+++ b/packages/vhd-lib/parseVhdStream.js
@@ -4,6 +4,7 @@ const { BLOCK_UNUSED, FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } = require('./_cons
const { readChunk } = require('@vates/read-chunk')
const assert = require('assert')
const { unpackFooter, unpackHeader, computeFullBlockSize } = require('./Vhd/_utils')
+const { asyncEach } = require('@vates/async-each')
const cappedBufferConcat = (buffers, maxSize) => {
let buffer = Buffer.concat(buffers)
@@ -13,114 +14,6 @@ const cappedBufferConcat = (buffers, maxSize) => {
return buffer
}
-exports.parseVhdStream = async function* parseVhdStream(stream) {
- let bytesRead = 0
-
- // handle empty space between elements
- // ensure we read stream in order
- async function read(offset, size) {
- assert(bytesRead <= offset, `offset is ${offset} but we already read ${bytesRead} bytes`)
- if (bytesRead < offset) {
- // empty spaces
- await read(bytesRead, offset - bytesRead)
- }
- const buf = await readChunk(stream, size)
- assert.strictEqual(buf.length, size, `read ${buf.length} instead of ${size}`)
- bytesRead += size
- return buf
- }
-
- const bufFooter = await read(0, FOOTER_SIZE)
-
- const footer = unpackFooter(bufFooter)
- yield { type: 'footer', footer, offset: 0 }
-
- const bufHeader = await read(FOOTER_SIZE, HEADER_SIZE)
- const header = unpackHeader(bufHeader, footer)
-
- yield { type: 'header', header, offset: SECTOR_SIZE }
- const blockSize = header.blockSize
- assert.strictEqual(blockSize % SECTOR_SIZE, 0)
-
- const fullBlockSize = computeFullBlockSize(blockSize)
-
- const bitmapSize = fullBlockSize - blockSize
-
- const index = []
-
- for (const parentLocatorId in header.parentLocatorEntry) {
- const parentLocatorEntry = header.parentLocatorEntry[parentLocatorId]
- // empty parent locator entry, does not exist in the content
- if (parentLocatorEntry.platformDataSpace === 0) {
- continue
- }
- index.push({
- ...parentLocatorEntry,
- type: 'parentLocator',
- offset: parentLocatorEntry.platformDataOffset,
- size: parentLocatorEntry.platformDataLength,
- id: parentLocatorId,
- })
- }
-
- const batOffset = header.tableOffset
- const batSize = Math.max(1, Math.ceil((header.maxTableEntries * 4) / SECTOR_SIZE)) * SECTOR_SIZE
-
- index.push({
- type: 'bat',
- offset: batOffset,
- size: batSize,
- })
-
- // sometimes some parent locator are before the BAT
- index.sort((a, b) => a.offset - b.offset)
- while (index.length > 0) {
- const item = index.shift()
- const buffer = await read(item.offset, item.size)
- item.buffer = buffer
-
- const { type } = item
- if (type === 'bat') {
- // found the BAT : read it and add block to index
-
- let blockCount = 0
- for (let blockCounter = 0; blockCounter < header.maxTableEntries; blockCounter++) {
- const batEntrySector = buffer.readUInt32BE(blockCounter * 4)
- // unallocated block, no need to export it
- if (batEntrySector !== BLOCK_UNUSED) {
- const batEntryBytes = batEntrySector * SECTOR_SIZE
- // ensure the block is not before the bat
- assert.ok(batEntryBytes >= batOffset + batSize)
- index.push({
- type: 'block',
- id: blockCounter,
- offset: batEntryBytes,
- size: fullBlockSize,
- })
- blockCount++
- }
- }
- // sort again index to ensure block and parent locator are in the right order
- index.sort((a, b) => a.offset - b.offset)
- item.blockCount = blockCount
- } else if (type === 'block') {
- item.bitmap = buffer.slice(0, bitmapSize)
- item.data = buffer.slice(bitmapSize)
- }
-
- yield item
- }
-
- /**
- * the second footer is at filesize - 512 , there can be empty spaces between last block
- * and the start of the footer
- *
- * we read till the end of the stream, and use the last 512 bytes as the footer
- */
- const bufFooterEnd = await readLastSector(stream)
- assert(bufFooter.equals(bufFooterEnd), 'footer1 !== footer2')
-}
-
function readLastSector(stream) {
return new Promise((resolve, reject) => {
let bufFooterEnd = Buffer.alloc(0)
@@ -134,3 +27,233 @@ function readLastSector(stream) {
stream.on('error', reject)
})
}
+
+class StreamParser {
+ #bufFooter
+ _bitmapSize = 0
+ _bytesRead = 0
+ _stream = null
+ _index = []
+ constructor(stream) {
+ this._stream = stream
+ }
+
+ async _read(offset, size) {
+ assert(this._bytesRead <= offset, `offset is ${offset} but we already read ${this._bytesRead} bytes`)
+ if (this._bytesRead < offset) {
+ // empty spaces
+ await this._read(this._bytesRead, offset - this._bytesRead)
+ }
+ const buf = await readChunk(this._stream, size)
+ assert.strictEqual(buf.length, size, `read ${buf.length} instead of ${size}`)
+ this._bytesRead += size
+ return buf
+ }
+
+ async *headers() {
+ this.#bufFooter = await this._read(0, FOOTER_SIZE)
+
+ const footer = unpackFooter(this.#bufFooter)
+
+ yield { type: 'footer', footer, offset: 0 }
+ const bufHeader = await this._read(FOOTER_SIZE, HEADER_SIZE)
+ const header = unpackHeader(bufHeader, footer)
+
+ yield { type: 'header', header, offset: SECTOR_SIZE }
+ const blockSize = header.blockSize
+ assert.strictEqual(blockSize % SECTOR_SIZE, 0)
+ const fullBlockSize = computeFullBlockSize(blockSize)
+ this._bitmapSize = fullBlockSize - blockSize
+
+ let batFound = false
+
+ for (const parentLocatorId in header.parentLocatorEntry) {
+ const parentLocatorEntry = header.parentLocatorEntry[parentLocatorId]
+ // empty parent locator entry, does not exist in the content
+ if (parentLocatorEntry.platformDataSpace === 0) {
+ continue
+ }
+ this._index.push({
+ ...parentLocatorEntry,
+ type: 'parentLocator',
+ offset: parentLocatorEntry.platformDataOffset,
+ size: parentLocatorEntry.platformDataLength,
+ id: parentLocatorId,
+ })
+ }
+
+ const batOffset = header.tableOffset
+ const batSize = Math.max(1, Math.ceil((header.maxTableEntries * 4) / SECTOR_SIZE)) * SECTOR_SIZE
+
+ this._index.push({
+ type: 'bat',
+ offset: batOffset,
+ size: batSize,
+ })
+
+ // sometimes some parent locator are before the BAT
+ this._index.sort((a, b) => a.offset - b.offset)
+
+ while (!batFound) {
+ const item = this._index.shift()
+ const buffer = await this._read(item.offset, item.size)
+ item.buffer = buffer
+
+ const { type } = item
+ if (type === 'bat') {
+ // found the BAT : read it and add block to index
+
+ let blockCount = 0
+ for (let blockCounter = 0; blockCounter < header.maxTableEntries; blockCounter++) {
+ const batEntrySector = buffer.readUInt32BE(blockCounter * 4)
+ // unallocated block, no need to export it
+ if (batEntrySector !== BLOCK_UNUSED) {
+ const batEntryBytes = batEntrySector * SECTOR_SIZE
+ // ensure the block is not before the bat
+ assert.ok(batEntryBytes >= batOffset + batSize)
+ this._index.push({
+ type: 'block',
+ id: blockCounter,
+ offset: batEntryBytes,
+ size: fullBlockSize,
+ })
+ blockCount++
+ }
+ }
+ // sort again index to ensure block and parent locator are in the right order
+ this._index.sort((a, b) => a.offset - b.offset)
+ item.blockCount = blockCount
+ batFound = true
+ }
+ yield item
+ }
+ }
+
+ async *blocks() {
+ while (this._index.length > 0) {
+ const item = this._index.shift()
+ const buffer = await this._read(item.offset, item.size)
+
+ item.bitmap = buffer.slice(0, this._bitmapSize)
+ item.data = buffer.slice(this._bitmapSize)
+ item.buffer = buffer
+ yield item
+ }
+ /**
+ * the second footer is at filesize - 512 , there can be empty spaces between last block
+ * and the start of the footer
+ *
+ * we read till the end of the stream, and use the last 512 bytes as the footer
+ */
+ const bufFooterEnd = await readLastSector(this._stream)
+ assert(this.#bufFooter.equals(bufFooterEnd), 'footer1 !== footer2')
+ }
+
+ async *parse() {
+ yield* this.headers()
+ yield* this.blocks()
+ }
+}
+
+// hybrid mode : read the headers from the vhd stream, and read the blocks from nbd
+class StreamNbdParser extends StreamParser {
+ #nbdClient = null
+ #concurrency = 16
+
+ constructor(stream, nbdClient = {}) {
+ super(stream)
+ this.#nbdClient = nbdClient
+ }
+
+ async _readBlockData(item) {
+ const SECTOR_BITMAP = Buffer.alloc(512, 255)
+ const client = this.#nbdClient
+ // we read in a raw file, so the block position is id x length, and have nothing to do with the offset
+ // in the vhd stream
+ const rawDataLength = item.size - SECTOR_BITMAP.length
+ const data = await client.readBlock(item.id, rawDataLength)
+
+ // end of file , non aligned vhd block
+ const buffer = Buffer.concat([SECTOR_BITMAP, data])
+ const block = {
+ ...item,
+ size: rawDataLength,
+ bitmap: SECTOR_BITMAP,
+ data,
+ buffer,
+ }
+ return block
+ }
+
+ async *blocks() {
+ // at most this array will be this.#concurrency long
+ const blocksReady = []
+ let waitingForBlock
+ let done = false
+ let error
+
+ function waitForYield(block) {
+ return new Promise(resolve => {
+ blocksReady.push({
+ block,
+ yielded: resolve,
+ })
+ if (waitingForBlock !== undefined) {
+ const resolver = waitingForBlock
+ waitingForBlock = undefined
+ resolver()
+ }
+ })
+ }
+
+ asyncEach(
+ this._index,
+ async blockId => {
+ const block = await this._readBlockData(blockId)
+ await waitForYield(block)
+ },
+ { concurrency: this.#concurrency }
+ )
+ .then(() => {
+ done = true
+ waitingForBlock?.()
+ })
+ .catch(err => {
+ // will keep only the last error if multiple throws
+ error = err
+ waitingForBlock?.()
+ })
+ // eslint-disable-next-line no-unmodified-loop-condition
+ while (!done) {
+ if (error) {
+ throw error
+ }
+ if (blocksReady.length > 0) {
+ const { block, yielded } = blocksReady.shift()
+ yielded()
+ yield block
+ } else {
+ await new Promise(resolve => {
+ waitingForBlock = resolve
+ })
+ }
+ }
+ }
+
+ async *parse() {
+ yield* this.headers()
+ yield* this.blocks()
+ // @todo : should we destroy it earlier ?
+ this._stream.destroy()
+ }
+}
+
+exports.parseVhdStream = async function* parseVhdStream(stream, nbdClient) {
+ let parser
+ if (nbdClient) {
+ parser = new StreamNbdParser(stream, nbdClient)
+ } else {
+ parser = new StreamParser(stream)
+ }
+ yield* parser.parse()
+}
diff --git a/packages/xo-server/src/xapi-object-to-xo.mjs b/packages/xo-server/src/xapi-object-to-xo.mjs
index 602ac298b..a9f26565d 100644
--- a/packages/xo-server/src/xapi-object-to-xo.mjs
+++ b/packages/xo-server/src/xapi-object-to-xo.mjs
@@ -710,6 +710,8 @@ const TRANSFORMS = {
tags: obj.tags,
PIFs: link(obj, 'PIFs'),
VIFs: link(obj, 'VIFs'),
+ nbd: obj.purpose?.includes('nbd'),
+ insecureNbd: obj.purpose?.includes('insecure_nbd'),
}
},
diff --git a/packages/xo-web/package.json b/packages/xo-web/package.json
index 4fa5845b7..4b6704ad2 100644
--- a/packages/xo-web/package.json
+++ b/packages/xo-web/package.json
@@ -27,7 +27,7 @@
">2%"
],
"engines": {
- "node": ">=6"
+ "node": ">=7"
},
"devDependencies": {
"@babel/core": "^7.13.8",
diff --git a/packages/xo-web/src/common/intl/messages.js b/packages/xo-web/src/common/intl/messages.js
index 01a858d57..2807b7c25 100644
--- a/packages/xo-web/src/common/intl/messages.js
+++ b/packages/xo-web/src/common/intl/messages.js
@@ -1230,6 +1230,10 @@ const messages = {
vifUnlockedNetworkWithIps: 'Some IPs are unnecessarily set as allowed for this interface',
vifUnknownNetwork: 'Unknown network',
vifCreate: 'Create',
+ nbd: 'NBD',
+ nbdTootltip: 'Network Block Device status',
+ nbdInsecureTooltip: 'Use of insecure NBD is not advised',
+ nbdSecureTooltip: 'Nbd connection is secure and ready',
// ----- VM snapshot tab -----
noSnapshots: 'No snapshots',
diff --git a/packages/xo-web/src/xo-app/host/tab-network.js b/packages/xo-web/src/xo-app/host/tab-network.js
index eba21468d..826841df2 100644
--- a/packages/xo-web/src/xo-app/host/tab-network.js
+++ b/packages/xo-web/src/xo-app/host/tab-network.js
@@ -227,6 +227,11 @@ const PIF_COLUMNS = [
name: _('pifModeLabel'),
sortCriteria: 'mode',
},
+ {
+ itemRenderer: (pif, userData) =>