feat(backups): use NBD to export VDIs when possible (#6461)

This commit is contained in:
Florent BEAUCHAMP 2022-10-27 16:50:56 +02:00 committed by GitHub
parent 03b505e40e
commit 7ede6bdbce
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 373 additions and 152 deletions

View File

@ -43,6 +43,7 @@ const DEFAULT_VM_SETTINGS = {
offlineSnapshot: false,
snapshotRetention: 0,
timeout: 0,
useNbd: false,
unconditionalSnapshot: false,
vmTimeout: 0,
}

View File

@ -659,9 +659,8 @@ class RemoteAdapter {
return path
}
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency } = {}) {
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency, nbdClient } = {}) {
const handler = this._handler
if (this.#useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
await createVhdDirectoryFromStream(handler, dataPath, input, {
@ -671,6 +670,7 @@ class RemoteAdapter {
await input.task
return validator.apply(this, arguments)
},
nbdClient,
})
await VhdAbstract.createAlias(handler, path, dataPath)
} else {

View File

@ -22,6 +22,7 @@
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.2",
"@vates/fuse-vhd": "^1.0.0",
"@vates/nbd-client": "*",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^3.2.0",

View File

@ -19,8 +19,9 @@ const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
const { checkVhd } = require('./_checkVhd.js')
const { packUuid } = require('./_packUuid.js')
const { Disposable } = require('promise-toolbox')
const NbdClient = require('@vates/nbd-client')
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
const { debug, warn } = createLogger('xo:backups:DeltaBackupWriter')
exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
async checkBaseVdis(baseUuidToSrcVdi) {
@ -199,12 +200,30 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
await checkVhd(handler, parentPath)
}
const vdiRef = vm.$xapi.getObject(vdi.uuid).$ref
let nbdClient
if (!this._backup.config.useNbd) {
// get nbd if possible
try {
// this will always take the first host in the list
const [nbdInfo] = await vm.$xapi.call('VDI.get_nbd_info', vdiRef)
nbdClient = new NbdClient(nbdInfo)
await nbdClient.connect()
debug(`got nbd connection `, { vdi: vdi.uuid })
} catch (error) {
nbdClient = undefined
debug(`can't connect to nbd server or no server available`, { error })
}
}
await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._backup.config.writeBlockConcurrency,
nbdClient,
})
if (isDelta) {

View File

@ -7,6 +7,8 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Delta Backup] Use [NBD](https://en.wikipedia.org/wiki/Network_block_device) to download disks (PR [#6461](https://github.com/vatesfr/xen-orchestra/pull/6461))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
@ -27,4 +29,7 @@
<!--packages-start-->
- xo-server minor
- xo-web minor
<!--packages-end-->

View File

@ -8,10 +8,10 @@ const { asyncEach } = require('@vates/async-each')
const { warn } = createLogger('vhd-lib:createVhdDirectoryFromStream')
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression }) {
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression, nbdClient }) {
const vhd = yield VhdDirectory.create(handler, path, { compression })
await asyncEach(
parseVhdStream(inputStream),
parseVhdStream(inputStream, nbdClient),
async function (item) {
switch (item.type) {
case 'footer':
@ -44,10 +44,10 @@ exports.createVhdDirectoryFromStream = async function createVhdDirectoryFromStre
handler,
path,
inputStream,
{ validator, concurrency = 16, compression } = {}
{ validator, concurrency = 16, compression, nbdClient } = {}
) {
try {
await buildVhd(handler, path, inputStream, { concurrency, compression })
await buildVhd(handler, path, inputStream, { concurrency, compression, nbdClient })
if (validator !== undefined) {
await validator.call(this, path)
}

View File

@ -4,6 +4,7 @@ const { BLOCK_UNUSED, FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } = require('./_cons
const { readChunk } = require('@vates/read-chunk')
const assert = require('assert')
const { unpackFooter, unpackHeader, computeFullBlockSize } = require('./Vhd/_utils')
const { asyncEach } = require('@vates/async-each')
const cappedBufferConcat = (buffers, maxSize) => {
let buffer = Buffer.concat(buffers)
@ -13,114 +14,6 @@ const cappedBufferConcat = (buffers, maxSize) => {
return buffer
}
exports.parseVhdStream = async function* parseVhdStream(stream) {
let bytesRead = 0
// handle empty space between elements
// ensure we read stream in order
async function read(offset, size) {
assert(bytesRead <= offset, `offset is ${offset} but we already read ${bytesRead} bytes`)
if (bytesRead < offset) {
// empty spaces
await read(bytesRead, offset - bytesRead)
}
const buf = await readChunk(stream, size)
assert.strictEqual(buf.length, size, `read ${buf.length} instead of ${size}`)
bytesRead += size
return buf
}
const bufFooter = await read(0, FOOTER_SIZE)
const footer = unpackFooter(bufFooter)
yield { type: 'footer', footer, offset: 0 }
const bufHeader = await read(FOOTER_SIZE, HEADER_SIZE)
const header = unpackHeader(bufHeader, footer)
yield { type: 'header', header, offset: SECTOR_SIZE }
const blockSize = header.blockSize
assert.strictEqual(blockSize % SECTOR_SIZE, 0)
const fullBlockSize = computeFullBlockSize(blockSize)
const bitmapSize = fullBlockSize - blockSize
const index = []
for (const parentLocatorId in header.parentLocatorEntry) {
const parentLocatorEntry = header.parentLocatorEntry[parentLocatorId]
// empty parent locator entry, does not exist in the content
if (parentLocatorEntry.platformDataSpace === 0) {
continue
}
index.push({
...parentLocatorEntry,
type: 'parentLocator',
offset: parentLocatorEntry.platformDataOffset,
size: parentLocatorEntry.platformDataLength,
id: parentLocatorId,
})
}
const batOffset = header.tableOffset
const batSize = Math.max(1, Math.ceil((header.maxTableEntries * 4) / SECTOR_SIZE)) * SECTOR_SIZE
index.push({
type: 'bat',
offset: batOffset,
size: batSize,
})
// sometimes some parent locator are before the BAT
index.sort((a, b) => a.offset - b.offset)
while (index.length > 0) {
const item = index.shift()
const buffer = await read(item.offset, item.size)
item.buffer = buffer
const { type } = item
if (type === 'bat') {
// found the BAT : read it and add block to index
let blockCount = 0
for (let blockCounter = 0; blockCounter < header.maxTableEntries; blockCounter++) {
const batEntrySector = buffer.readUInt32BE(blockCounter * 4)
// unallocated block, no need to export it
if (batEntrySector !== BLOCK_UNUSED) {
const batEntryBytes = batEntrySector * SECTOR_SIZE
// ensure the block is not before the bat
assert.ok(batEntryBytes >= batOffset + batSize)
index.push({
type: 'block',
id: blockCounter,
offset: batEntryBytes,
size: fullBlockSize,
})
blockCount++
}
}
// sort again index to ensure block and parent locator are in the right order
index.sort((a, b) => a.offset - b.offset)
item.blockCount = blockCount
} else if (type === 'block') {
item.bitmap = buffer.slice(0, bitmapSize)
item.data = buffer.slice(bitmapSize)
}
yield item
}
/**
* the second footer is at filesize - 512 , there can be empty spaces between last block
* and the start of the footer
*
* we read till the end of the stream, and use the last 512 bytes as the footer
*/
const bufFooterEnd = await readLastSector(stream)
assert(bufFooter.equals(bufFooterEnd), 'footer1 !== footer2')
}
function readLastSector(stream) {
return new Promise((resolve, reject) => {
let bufFooterEnd = Buffer.alloc(0)
@ -134,3 +27,233 @@ function readLastSector(stream) {
stream.on('error', reject)
})
}
class StreamParser {
#bufFooter
_bitmapSize = 0
_bytesRead = 0
_stream = null
_index = []
constructor(stream) {
this._stream = stream
}
async _read(offset, size) {
assert(this._bytesRead <= offset, `offset is ${offset} but we already read ${this._bytesRead} bytes`)
if (this._bytesRead < offset) {
// empty spaces
await this._read(this._bytesRead, offset - this._bytesRead)
}
const buf = await readChunk(this._stream, size)
assert.strictEqual(buf.length, size, `read ${buf.length} instead of ${size}`)
this._bytesRead += size
return buf
}
async *headers() {
this.#bufFooter = await this._read(0, FOOTER_SIZE)
const footer = unpackFooter(this.#bufFooter)
yield { type: 'footer', footer, offset: 0 }
const bufHeader = await this._read(FOOTER_SIZE, HEADER_SIZE)
const header = unpackHeader(bufHeader, footer)
yield { type: 'header', header, offset: SECTOR_SIZE }
const blockSize = header.blockSize
assert.strictEqual(blockSize % SECTOR_SIZE, 0)
const fullBlockSize = computeFullBlockSize(blockSize)
this._bitmapSize = fullBlockSize - blockSize
let batFound = false
for (const parentLocatorId in header.parentLocatorEntry) {
const parentLocatorEntry = header.parentLocatorEntry[parentLocatorId]
// empty parent locator entry, does not exist in the content
if (parentLocatorEntry.platformDataSpace === 0) {
continue
}
this._index.push({
...parentLocatorEntry,
type: 'parentLocator',
offset: parentLocatorEntry.platformDataOffset,
size: parentLocatorEntry.platformDataLength,
id: parentLocatorId,
})
}
const batOffset = header.tableOffset
const batSize = Math.max(1, Math.ceil((header.maxTableEntries * 4) / SECTOR_SIZE)) * SECTOR_SIZE
this._index.push({
type: 'bat',
offset: batOffset,
size: batSize,
})
// sometimes some parent locator are before the BAT
this._index.sort((a, b) => a.offset - b.offset)
while (!batFound) {
const item = this._index.shift()
const buffer = await this._read(item.offset, item.size)
item.buffer = buffer
const { type } = item
if (type === 'bat') {
// found the BAT : read it and add block to index
let blockCount = 0
for (let blockCounter = 0; blockCounter < header.maxTableEntries; blockCounter++) {
const batEntrySector = buffer.readUInt32BE(blockCounter * 4)
// unallocated block, no need to export it
if (batEntrySector !== BLOCK_UNUSED) {
const batEntryBytes = batEntrySector * SECTOR_SIZE
// ensure the block is not before the bat
assert.ok(batEntryBytes >= batOffset + batSize)
this._index.push({
type: 'block',
id: blockCounter,
offset: batEntryBytes,
size: fullBlockSize,
})
blockCount++
}
}
// sort again index to ensure block and parent locator are in the right order
this._index.sort((a, b) => a.offset - b.offset)
item.blockCount = blockCount
batFound = true
}
yield item
}
}
async *blocks() {
while (this._index.length > 0) {
const item = this._index.shift()
const buffer = await this._read(item.offset, item.size)
item.bitmap = buffer.slice(0, this._bitmapSize)
item.data = buffer.slice(this._bitmapSize)
item.buffer = buffer
yield item
}
/**
* the second footer is at filesize - 512 , there can be empty spaces between last block
* and the start of the footer
*
* we read till the end of the stream, and use the last 512 bytes as the footer
*/
const bufFooterEnd = await readLastSector(this._stream)
assert(this.#bufFooter.equals(bufFooterEnd), 'footer1 !== footer2')
}
async *parse() {
yield* this.headers()
yield* this.blocks()
}
}
// hybrid mode : read the headers from the vhd stream, and read the blocks from nbd
class StreamNbdParser extends StreamParser {
#nbdClient = null
#concurrency = 16
constructor(stream, nbdClient = {}) {
super(stream)
this.#nbdClient = nbdClient
}
async _readBlockData(item) {
const SECTOR_BITMAP = Buffer.alloc(512, 255)
const client = this.#nbdClient
// we read in a raw file, so the block position is id x length, and have nothing to do with the offset
// in the vhd stream
const rawDataLength = item.size - SECTOR_BITMAP.length
const data = await client.readBlock(item.id, rawDataLength)
// end of file , non aligned vhd block
const buffer = Buffer.concat([SECTOR_BITMAP, data])
const block = {
...item,
size: rawDataLength,
bitmap: SECTOR_BITMAP,
data,
buffer,
}
return block
}
async *blocks() {
// at most this array will be this.#concurrency long
const blocksReady = []
let waitingForBlock
let done = false
let error
function waitForYield(block) {
return new Promise(resolve => {
blocksReady.push({
block,
yielded: resolve,
})
if (waitingForBlock !== undefined) {
const resolver = waitingForBlock
waitingForBlock = undefined
resolver()
}
})
}
asyncEach(
this._index,
async blockId => {
const block = await this._readBlockData(blockId)
await waitForYield(block)
},
{ concurrency: this.#concurrency }
)
.then(() => {
done = true
waitingForBlock?.()
})
.catch(err => {
// will keep only the last error if multiple throws
error = err
waitingForBlock?.()
})
// eslint-disable-next-line no-unmodified-loop-condition
while (!done) {
if (error) {
throw error
}
if (blocksReady.length > 0) {
const { block, yielded } = blocksReady.shift()
yielded()
yield block
} else {
await new Promise(resolve => {
waitingForBlock = resolve
})
}
}
}
async *parse() {
yield* this.headers()
yield* this.blocks()
// @todo : should we destroy it earlier ?
this._stream.destroy()
}
}
exports.parseVhdStream = async function* parseVhdStream(stream, nbdClient) {
let parser
if (nbdClient) {
parser = new StreamNbdParser(stream, nbdClient)
} else {
parser = new StreamParser(stream)
}
yield* parser.parse()
}

View File

@ -710,6 +710,8 @@ const TRANSFORMS = {
tags: obj.tags,
PIFs: link(obj, 'PIFs'),
VIFs: link(obj, 'VIFs'),
nbd: obj.purpose?.includes('nbd'),
insecureNbd: obj.purpose?.includes('insecure_nbd'),
}
},

View File

@ -27,7 +27,7 @@
">2%"
],
"engines": {
"node": ">=6"
"node": ">=7"
},
"devDependencies": {
"@babel/core": "^7.13.8",

View File

@ -1230,6 +1230,10 @@ const messages = {
vifUnlockedNetworkWithIps: 'Some IPs are unnecessarily set as allowed for this interface',
vifUnknownNetwork: 'Unknown network',
vifCreate: 'Create',
nbd: 'NBD',
nbdTootltip: 'Network Block Device status',
nbdInsecureTooltip: 'Use of insecure NBD is not advised',
nbdSecureTooltip: 'Nbd connection is secure and ready',
// ----- VM snapshot tab -----
noSnapshots: 'No snapshots',

View File

@ -227,6 +227,11 @@ const PIF_COLUMNS = [
name: _('pifModeLabel'),
sortCriteria: 'mode',
},
{
itemRenderer: (pif, userData) => <PifItemMode pif={pif} networks={userData.networks} />,
name: _('pifModeLabel'),
sortCriteria: 'mode',
},
{
itemRenderer: pif => pif.mac,
name: _('pifMacLabel'),
@ -246,6 +251,26 @@ const PIF_COLUMNS = [
itemRenderer: (pif, userData) => <PifItemLock pif={pif} networks={userData.networks} />,
name: _('defaultLockingMode'),
},
{
itemRenderer: (pif, { nbd, networks, insecure_nbd }) => {
if (networks[pif.$network]?.nbd) {
return (
<Tooltip content={_('nbdSecureTooltip')}>
<Icon icon='lock' />
</Tooltip>
)
}
if (networks[pif.$network]?.insecure_nbd) {
;<Tooltip content={_('nbdInsecureTooltip')}>
<Icon icon='unlock' />
<Icon icon='error' />
</Tooltip>
}
return null
},
name: <Tooltip content={_('nbdTootltip')}>{_('nbd')}</Tooltip>,
},
{
itemRenderer: pif => (
<div>
@ -325,7 +350,8 @@ const PVT_NETWORK_ACTIONS = [
},
]
export default ({ host, networks, pifs, privateNetworks }) => (
export default ({ host, networks, pifs, privateNetworks }) => {
return (
<Container>
<Row>
<Col>
@ -369,4 +395,5 @@ export default ({ host, networks, pifs, privateNetworks }) => (
</Row>
)}
</Container>
)
)
}

View File

@ -291,6 +291,26 @@ const NETWORKS_COLUMNS = [
name: _('poolNetworkMTU'),
itemRenderer: network => network.MTU,
},
{
itemRenderer: ({ nbd, networks, insecure_nbd }) => {
if (nbd) {
return (
<Tooltip content={_('nbdSecureTooltip')}>
<Icon icon='lock' />
</Tooltip>
)
}
if (insecure_nbd) {
;<Tooltip content={_('nbdInsecureTooltip')}>
<Icon icon='unlock' />
<Icon icon='error' />
</Tooltip>
}
return null
},
name: <Tooltip content={_('nbdTootltip')}>{_('nbd')}</Tooltip>,
},
{
name: (
<div className='text-xs-center'>

View File

@ -723,6 +723,25 @@ const COLUMNS = [
name: _('vifRateLimitLabel'),
sortCriteria: 'rateLimit',
},
{
itemRenderer: (vif, { nbd, networks, insecure_nbd }) => {
if (networks[vif.$network]?.nbd) {
return (
<Tooltip content={_('nbdSecureTooltip')}>
<Icon icon='lock' />
</Tooltip>
)
}
if (networks[vif.$network]?.insecure_nbd) {
;<Tooltip content={_('nbdInsecureTooltip')}>
<Icon icon='unlock' />
<Icon icon='error' />
</Tooltip>
}
return null
},
name: <Tooltip content={_('nbdTootltip')}>{_('nbd')}</Tooltip>,
},
{
itemRenderer: ({ device }, { ipsByDevice }) => {
const ips = ipsByDevice[device]