Compare commits

...

37 Commits

Author SHA1 Message Date
Florent Beauchamp
42ddadac7b feat(xo-web/backups): improve incremental/key backup display
when the backup is completly composed of  key or completly composed of differencing
backup, the number of disk can be hard to understand
In this case, this PR will only show the number if the backup is mixed
2023-12-20 10:55:14 +00:00
Mathieu
41ed5625be fix(xo-server/RPU): correctly migrate VMs to their original host (#7238)
See zamma#19772
Introduced by bea771
2023-12-19 19:22:50 +01:00
Mathieu
e66bcf2a5c feat(xapi/VDI_exportContent): create XAPI task during NBD export (#7228)
See zammad#19003
2023-12-19 19:16:44 +01:00
b-Nollet
c40e71ed49 fix(xo-web/backup): fix reportWhen being reset to undefined (#7235)
See Zammad#17506

Preventing `reportWhen` value from becoming undefined (and resetting to the
default value _Failure_) when a mirror backup is edited without modifying this
field.
2023-12-19 17:08:19 +01:00
Florent BEAUCHAMP
439c721472 refactor(backups/cleanVm) use of merge parameter 2023-12-19 15:42:59 +01:00
Florent Beauchamp
99429edf23 test(backups/cleanVm): fix after changes 2023-12-19 15:42:59 +01:00
Florent Beauchamp
cec8237a47 feat(xo-web): show if an incremental backup is a delta or a full 2023-12-19 15:42:59 +01:00
Florent Beauchamp
e13d55bfa9 feat(backups): store if disks of incremental backups are differencing in metadata 2023-12-19 15:42:59 +01:00
Florent Beauchamp
141c141516 refactor(backups): differentialVhds to isVhdDifferencing 2023-12-19 15:42:59 +01:00
Florent Beauchamp
7a47d23191 feat(backups): show warning message if xva are truncated 2023-12-19 15:42:59 +01:00
Florent Beauchamp
7a8bf671fb feat(backups): put back warning message on truncated vhds 2023-12-19 15:42:59 +01:00
Florent Beauchamp
7f83a3e55e fix(backup, vhd-lib): merge now return the resulting size and merged size 2023-12-19 15:42:59 +01:00
Florent Beauchamp
7f8ab07692 fix(backup): transferred backup size for incremental backup 2023-12-19 15:42:59 +01:00
Julien Fontanet
2634008a6a docs(VM backups): fix settings link 2023-12-19 15:41:39 +01:00
Florent Beauchamp
4c652a457f fixup! refactor(nbd-client): remove unused method and default iterator 2023-12-19 15:28:32 +01:00
Florent Beauchamp
89dc40a1c5 fixup! feat(nbd-client,xapi): implement multiple connections NBD 2023-12-19 15:28:32 +01:00
Florent Beauchamp
04a7982801 test(nbd-client): add test for unaligned export size 2023-12-19 15:28:32 +01:00
Florent Beauchamp
df9b59f980 fix(nbd-client): better handling of multiple disconnection 2023-12-19 15:28:32 +01:00
Florent Beauchamp
fe215a53af feat(nbd-client): expose exportSize on multi nbd client 2023-12-19 15:28:32 +01:00
Florent Beauchamp
0559c843c4 fix(nbd-client): put back retry on block read 2023-12-19 15:28:32 +01:00
Florent Beauchamp
79967e0eec feat(nbd-client/multi): allow partial connexion 2023-12-19 15:28:32 +01:00
Florent Beauchamp
847ad63c09 feat(backups,xo-server,xo-web): make NBD concurrency configurable 2023-12-19 15:28:32 +01:00
Florent Beauchamp
fc1357db93 refactor(nbd-client): remove unused method and default iterator 2023-12-19 15:28:32 +01:00
Florent Beauchamp
b644cbe28d feat(nbd-client,xapi): implement multiple connections NBD 2023-12-19 15:28:32 +01:00
Florent Beauchamp
7ddfb2a684 fix(vhd-lib/createStreamNbd): create binary instead of object stream
This reduces memory consumption.
2023-12-19 15:28:32 +01:00
Julien Fontanet
5a0cfd86c7 fix(xo-server/remote#_unserialize): handle boolean enabled
Introduced by 32afd5c46

Fixes #7246
Fixes https://xcp-ng.org/forum/post/68575
2023-12-19 15:11:34 +01:00
Julien Fontanet
70e3ba17af fix(CHANGELOG.unreleased): metadata → mirror
Introduced by 2b973275c
2023-12-19 15:07:01 +01:00
Julien Fontanet
4784bbfb99 fix(xo-server/vm.migrate): dont hide original error 2023-12-18 15:02:51 +01:00
MlssFrncJrg
ceddddd7f2 fix(xo-server/PIF): call PIF.forget to delete PIF (#7221)
Fixes #7193

Calling `pif.destroy` generates PIF_IS_PHYSICAL error when the PIF has been
physically removed
2023-12-18 14:41:58 +01:00
Julien Fontanet
32afd5c463 feat(xo-server): save JSON records in Redis (#7113)
Better support of non-string properties without having to handle them one by one.

To avoid breaking compatibility with older versions, this should not be merged after reading JSON records has been supported for at least a month.
2023-12-18 14:26:51 +01:00
Julien Fontanet
ac391f6a0f docs(incremental_backups): fix image path
Introduced by a0b50b47e
2023-12-18 11:45:23 +01:00
Manon Mercier
a0b50b47ef docs(incremental_backups): explain NBD (#7240) 2023-12-18 11:41:20 +01:00
b-Nollet
e3618416bf fix(xo-server-transport-slack): slack-node → @slack/webhook (#7220)
Fixes #7130

Changing the package used for Slack webhooks, to ensure compatibility with slack-compatible services like Discord or Mattermost.
2023-12-18 10:56:54 +01:00
Julien Fontanet
37fd6d13db chore(xo-server-transport-email): dont use deprecated nodemailer-markdown
BREAKING CHANGE: now requires Node >=18.'

Permits the use of a more recent `marked`.
2023-12-17 11:56:18 +01:00
Julien Fontanet
eb56666f98 chore(xen-api,xo-server): update to proxy-agent@6.3.1 2023-12-16 16:41:10 +01:00
Julien Fontanet
b7daee81c0 feat(xo-server): http.useForwardedHeaders (#7233)
Fixes https://xcp-ng.org/forum/post/67625

This setting can be enabled when XO is behind a reverse proxy to
fetch clients IP addresses from `X-Forwarded-*` headers.
2023-12-16 12:34:58 +01:00
Julien Fontanet
bee0eb9091 chore: update dev deps 2023-12-15 14:09:18 +01:00
41 changed files with 804 additions and 617 deletions

View File

@@ -4,7 +4,6 @@ import { connect } from 'node:tls'
import { fromCallback, pRetry, pDelay, pTimeout, pFromCallback } from 'promise-toolbox'
import { readChunkStrict } from '@vates/read-chunk'
import { createLogger } from '@xen-orchestra/log'
import {
INIT_PASSWD,
NBD_CMD_READ,
@@ -21,8 +20,6 @@ import {
OPTS_MAGIC,
NBD_CMD_DISC,
} from './constants.mjs'
import { Readable } from 'node:stream'
const { warn } = createLogger('vates:nbd-client')
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
@@ -125,6 +122,8 @@ export default class NbdClient {
if (!this.#connected) {
return
}
this.#connected = false
const socket = this.#serverSocket
const queryId = this.#nextCommandQueryId
this.#nextCommandQueryId++
@@ -137,12 +136,12 @@ export default class NbdClient {
buffer.writeBigUInt64BE(0n, 16)
buffer.writeInt32BE(0, 24)
const promise = pFromCallback(cb => {
this.#serverSocket.end(buffer, 'utf8', cb)
socket.end(buffer, 'utf8', cb)
})
try {
await pTimeout.call(promise, this.#messageTimeout)
} catch (error) {
this.#serverSocket.destroy()
socket.destroy()
}
this.#serverSocket = undefined
this.#connected = false
@@ -290,7 +289,7 @@ export default class NbdClient {
}
}
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
async #readBlock(index, size) {
// we don't want to add anything in backlog while reconnecting
if (this.#reconnectingPromise) {
await this.#reconnectingPromise
@@ -338,57 +337,13 @@ export default class NbdClient {
})
}
async *readBlocks(indexGenerator = 2 * 1024 * 1024) {
// default : read all blocks
if (typeof indexGenerator === 'number') {
const exportSize = Number(this.#exportSize)
const chunkSize = indexGenerator
indexGenerator = function* () {
const nbBlocks = Math.ceil(exportSize / chunkSize)
for (let index = 0; index < nbBlocks; index++) {
yield { index, size: chunkSize }
}
}
}
const readAhead = []
const readAheadMaxLength = this.#readAhead
const makeReadBlockPromise = (index, size) => {
const promise = pRetry(() => this.readBlock(index, size), {
tries: this.#readBlockRetries,
onRetry: async err => {
warn('will retry reading block ', index, err)
await this.reconnect()
},
})
// error is handled during unshift
promise.catch(() => {})
return promise
}
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
for (const { index, size } of indexGenerator()) {
// stack readAheadMaxLength promises before starting to handle the results
if (readAhead.length === readAheadMaxLength) {
// any error will stop reading blocks
yield readAhead.shift()
}
readAhead.push(makeReadBlockPromise(index, size))
}
while (readAhead.length > 0) {
yield readAhead.shift()
}
}
stream(chunkSize) {
async function* iterator() {
for await (const chunk of this.readBlocks(chunkSize)) {
yield chunk
}
}
// create a readable stream instead of returning the iterator
// since iterators don't like unshift and partial reading
return Readable.from(iterator())
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
return pRetry(() => this.#readBlock(index, size), {
tries: this.#readBlockRetries,
onRetry: async err => {
warn('will retry reading block ', index, err)
await this.reconnect()
},
})
}
}

View File

@@ -0,0 +1,87 @@
import { asyncEach } from '@vates/async-each'
import { NBD_DEFAULT_BLOCK_SIZE } from './constants.mjs'
import NbdClient from './index.mjs'
import { createLogger } from '@xen-orchestra/log'
const { warn } = createLogger('vates:nbd-client:multi')
export default class MultiNbdClient {
#clients = []
#readAhead
get exportSize() {
return this.#clients[0].exportSize
}
constructor(settings, { nbdConcurrency = 8, readAhead = 16, ...options } = {}) {
this.#readAhead = readAhead
if (!Array.isArray(settings)) {
settings = [settings]
}
for (let i = 0; i < nbdConcurrency; i++) {
this.#clients.push(
new NbdClient(settings[i % settings.length], { ...options, readAhead: Math.ceil(readAhead / nbdConcurrency) })
)
}
}
async connect() {
const connectedClients = []
for (const clientId in this.#clients) {
const client = this.#clients[clientId]
try {
await client.connect()
connectedClients.push(client)
} catch (err) {
client.disconnect().catch(() => {})
warn(`can't connect to one nbd client`, { err })
}
}
if (connectedClients.length === 0) {
throw new Error(`Fail to connect to any Nbd client`)
}
if (connectedClients.length < this.#clients.length) {
warn(
`incomplete connection by multi Nbd, only ${connectedClients.length} over ${
this.#clients.length
} expected clients`
)
this.#clients = connectedClients
}
}
async disconnect() {
await asyncEach(this.#clients, client => client.disconnect(), {
stopOnError: false,
})
}
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
const clientId = index % this.#clients.length
return this.#clients[clientId].readBlock(index, size)
}
async *readBlocks(indexGenerator) {
// default : read all blocks
const readAhead = []
const makeReadBlockPromise = (index, size) => {
const promise = this.readBlock(index, size)
// error is handled during unshift
promise.catch(() => {})
return promise
}
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
for (const { index, size } of indexGenerator()) {
// stack readAheadMaxLength promises before starting to handle the results
if (readAhead.length === this.#readAhead) {
// any error will stop reading blocks
yield readAhead.shift()
}
readAhead.push(makeReadBlockPromise(index, size))
}
while (readAhead.length > 0) {
yield readAhead.shift()
}
}
}

View File

@@ -1,4 +1,3 @@
import NbdClient from '../index.mjs'
import { spawn, exec } from 'node:child_process'
import fs from 'node:fs/promises'
import { test } from 'tap'
@@ -7,8 +6,10 @@ import { pFromCallback } from 'promise-toolbox'
import { Socket } from 'node:net'
import { NBD_DEFAULT_PORT } from '../constants.mjs'
import assert from 'node:assert'
import MultiNbdClient from '../multi.mjs'
const FILE_SIZE = 10 * 1024 * 1024
const CHUNK_SIZE = 1024 * 1024 // non default size
const FILE_SIZE = 1024 * 1024 * 9.5 // non aligned file size
async function createTempFile(size) {
const tmpPath = await pFromCallback(cb => tmp.file(cb))
@@ -81,7 +82,7 @@ test('it works with unsecured network', async tap => {
const path = await createTempFile(FILE_SIZE)
let nbdServer = await spawnNbdKit(path)
const client = new NbdClient(
const client = new MultiNbdClient(
{
address: '127.0.0.1',
exportname: 'MY_SECRET_EXPORT',
@@ -109,13 +110,13 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
`,
},
{
nbdConcurrency: 1,
readAhead: 2,
}
)
await client.connect()
tap.equal(client.exportSize, BigInt(FILE_SIZE))
const CHUNK_SIZE = 1024 * 1024 // non default size
const indexes = []
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
indexes.push(i)
@@ -127,9 +128,9 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
})
let i = 0
for await (const block of nbdIterator) {
let blockOk = true
let blockOk = block.length === Math.min(CHUNK_SIZE, FILE_SIZE - CHUNK_SIZE * i)
let firstFail
for (let j = 0; j < CHUNK_SIZE; j += 4) {
for (let j = 0; j < block.length; j += 4) {
const wanted = i * CHUNK_SIZE + j
const found = block.readUInt32BE(j)
blockOk = blockOk && found === wanted
@@ -137,7 +138,7 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
firstFail = j
}
}
tap.ok(blockOk, `check block ${i} content`)
tap.ok(blockOk, `check block ${i} content ${block.length}`)
i++
// flaky server is flaky
@@ -147,17 +148,6 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
nbdServer = await spawnNbdKit(path)
}
}
// we can reuse the conneciton to read other blocks
// default iterator
const nbdIteratorWithDefaultBlockIterator = client.readBlocks()
let nb = 0
for await (const block of nbdIteratorWithDefaultBlockIterator) {
nb++
tap.equal(block.length, 2 * 1024 * 1024)
}
tap.equal(nb, 5)
assert.rejects(() => client.readBlock(100, CHUNK_SIZE))
await client.disconnect()

View File

@@ -67,6 +67,11 @@ async function generateVhd(path, opts = {}) {
await VhdAbstract.createAlias(handler, path + '.alias.vhd', dataPath)
}
if (opts.blocks) {
for (const blockId of opts.blocks) {
await vhd.writeEntireBlock({ id: blockId, buffer: Buffer.alloc(2 * 1024 * 1024 + 512, blockId) })
}
}
await vhd.writeBlockAllocationTable()
await vhd.writeHeader()
await vhd.writeFooter()
@@ -230,7 +235,7 @@ test('it merges delta of non destroyed chain', async () => {
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
// size should be the size of children + grand children after the merge
assert.equal(metadata.size, 209920)
assert.equal(metadata.size, 104960)
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
@@ -320,6 +325,7 @@ describe('tests multiple combination ', () => {
const ancestor = await generateVhd(`${basePath}/ancestor.vhd`, {
useAlias,
mode: vhdMode,
blocks: [1, 3],
})
const child = await generateVhd(`${basePath}/child.vhd`, {
useAlias,
@@ -328,6 +334,7 @@ describe('tests multiple combination ', () => {
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUuid: ancestor.footer.uuid,
},
blocks: [1, 2],
})
// a grand child vhd in metadata
await generateVhd(`${basePath}/grandchild.vhd`, {
@@ -337,6 +344,7 @@ describe('tests multiple combination ', () => {
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUuid: child.footer.uuid,
},
blocks: [2, 3],
})
// an older parent that was merging in clean
@@ -395,7 +403,7 @@ describe('tests multiple combination ', () => {
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
// size should be the size of children + grand children + clean after the merge
assert.deepEqual(metadata.size, vhdMode === 'file' ? 314880 : undefined)
assert.deepEqual(metadata.size, vhdMode === 'file' ? 6502400 : 6501888)
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
// ancestor and child should be merged

View File

@@ -36,34 +36,32 @@ const computeVhdsSize = (handler, vhdPaths) =>
)
// chain is [ ancestor, child_1, ..., child_n ]
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
if (merge) {
logInfo(`merging VHD chain`, { chain })
async function _mergeVhdChain(handler, chain, { logInfo, remove, mergeBlockConcurrency }) {
logInfo(`merging VHD chain`, { chain })
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
logInfo('merge in progress', {
done,
parent: chain[0],
progress: Math.round((100 * done) / total),
total,
})
}
}, 10e3)
try {
return await mergeVhdChain(handler, chain, {
logInfo,
mergeBlockConcurrency,
onProgress({ done: d, total: t }) {
done = d
total = t
},
removeUnused: remove,
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
logInfo('merge in progress', {
done,
parent: chain[0],
progress: Math.round((100 * done) / total),
total,
})
} finally {
clearInterval(handle)
}
}, 10e3)
try {
return await mergeVhdChain(handler, chain, {
logInfo,
mergeBlockConcurrency,
onProgress({ done: d, total: t }) {
done = d
total = t
},
removeUnused: remove,
})
} finally {
clearInterval(handle)
}
}
@@ -471,23 +469,20 @@ export async function cleanVm(
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(handler, chain, {
const { finalVhdSize } = await limitedMergeVhdChain(handler, chain, {
logInfo,
logWarn,
remove,
merge,
mergeBlockConcurrency,
})
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
}
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = (metadataWithMergedVhd[metadataPath] ?? 0) + finalVhdSize
})
}
await Promise.all([
...unusedVhdsDeletion,
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : () => Promise.resolve()),
asyncMap(unusedXvas, path => {
logWarn('unused XVA', { path })
if (remove) {
@@ -509,12 +504,11 @@ export async function cleanVm(
// update size for delta metadata with merged VHD
// check for the other that the size is the same as the real file size
await asyncMap(jsons, async metadataPath => {
const metadata = backups.get(metadataPath)
let fileSystemSize
const merged = metadataWithMergedVhd[metadataPath] !== undefined
const mergedSize = metadataWithMergedVhd[metadataPath]
const { mode, size, vhds, xva } = metadata
@@ -524,26 +518,29 @@ export async function cleanVm(
const linkedXva = resolve('/', vmDir, xva)
try {
fileSystemSize = await handler.getSize(linkedXva)
if (fileSystemSize !== size && fileSystemSize !== undefined) {
logWarn('cleanVm: incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
}
} catch (error) {
// can fail with encrypted remote
}
} else if (mode === 'delta') {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
// the size is not computed in some cases (e.g. VhdDirectory)
if (fileSystemSize === undefined) {
return
}
// don't warn if the size has changed after a merge
if (!merged && fileSystemSize !== size) {
// FIXME: figure out why it occurs so often and, once fixed, log the real problems with `logWarn`
console.warn('cleanVm: incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
if (mergedSize === undefined) {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
// the size is not computed in some cases (e.g. VhdDirectory)
if (fileSystemSize !== undefined && fileSystemSize !== size) {
logWarn('cleanVm: incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
}
}
}
} catch (error) {
@@ -551,9 +548,19 @@ export async function cleanVm(
return
}
// systematically update size after a merge
if ((merged || fixMetadata) && size !== fileSystemSize) {
metadata.size = fileSystemSize
// systematically update size and differentials after a merge
// @todo : after 2024-04-01 remove the fixmetadata options since the size computation is fixed
if (mergedSize || (fixMetadata && fileSystemSize !== size)) {
metadata.size = mergedSize ?? fileSystemSize ?? size
if (mergedSize) {
// all disks are now key disk
metadata.isVhdDifferencing = {}
for (const id of Object.values(metadata.vdis ?? {})) {
metadata.isVhdDifferencing[`${id}.vhd`] = false
}
}
mustRegenerateCache = true
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })

View File

@@ -34,6 +34,7 @@ export async function exportIncrementalVm(
fullVdisRequired = new Set(),
disableBaseTags = false,
nbdConcurrency = 1,
preferNbd,
} = {}
) {
@@ -82,6 +83,7 @@ export async function exportIncrementalVm(
baseRef: baseVdi?.$ref,
cancelToken,
format: 'vhd',
nbdConcurrency,
preferNbd,
})
})

View File

@@ -32,10 +32,10 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
useChain: false,
})
const differentialVhds = {}
const isVhdDifferencing = {}
await asyncEach(Object.entries(incrementalExport.streams), async ([key, stream]) => {
differentialVhds[key] = await isVhdDifferencingDisk(stream)
isVhdDifferencing[key] = await isVhdDifferencingDisk(stream)
})
incrementalExport.streams = mapValues(incrementalExport.streams, this._throttleStream)
@@ -43,7 +43,7 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
writer =>
writer.transfer({
deltaExport: forkDeltaExport(incrementalExport),
differentialVhds,
isVhdDifferencing,
timestamp: metadata.timestamp,
vm: metadata.vm,
vmSnapshot: metadata.vmSnapshot,

View File

@@ -41,6 +41,7 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
const deltaExport = await exportIncrementalVm(exportedVm, baseVm, {
fullVdisRequired,
nbdConcurrency: this._settings.nbdConcurrency,
preferNbd: this._settings.preferNbd,
})
// since NBD is network based, if one disk use nbd , all the disk use them
@@ -49,11 +50,11 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
Task.info('Transfer data using NBD')
}
const differentialVhds = {}
const isVhdDifferencing = {}
// since isVhdDifferencingDisk is reading and unshifting data in stream
// it should be done BEFORE any other stream transform
await asyncEach(Object.entries(deltaExport.streams), async ([key, stream]) => {
differentialVhds[key] = await isVhdDifferencingDisk(stream)
isVhdDifferencing[key] = await isVhdDifferencingDisk(stream)
})
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
@@ -68,7 +69,7 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
writer =>
writer.transfer({
deltaExport: forkDeltaExport(deltaExport),
differentialVhds,
isVhdDifferencing,
sizeContainers,
timestamp,
vm,

View File

@@ -133,7 +133,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
}
}
async _transfer($defer, { differentialVhds, timestamp, deltaExport, vm, vmSnapshot }) {
async _transfer($defer, { isVhdDifferencing, timestamp, deltaExport, vm, vmSnapshot }) {
const adapter = this._adapter
const job = this._job
const scheduleId = this._scheduleId
@@ -161,6 +161,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
)
metadataContent = {
isVhdDifferencing,
jobId,
mode: job.mode,
scheduleId,
@@ -180,9 +181,9 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
async ([id, vdi]) => {
const path = `${this._vmBackupDir}/${vhds[id]}`
const isDelta = differentialVhds[`${id}.vhd`]
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
let parentPath
if (isDelta) {
if (isDifferencing) {
const vdiDir = dirname(path)
parentPath = (
await handler.list(vdiDir, {
@@ -204,16 +205,20 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
// TODO remove when this has been done before the export
await checkVhd(handler, parentPath)
}
transferSize += await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
// don't write it as transferSize += await async function
// since i += await asyncFun lead to race condition
// as explained : https://eslint.org/docs/latest/rules/require-atomic-updates
const transferSizeOneDisk = await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._config.writeBlockConcurrency,
})
transferSize += transferSizeOneDisk
if (isDelta) {
if (isDifferencing) {
await chainVhd(handler, parentPath, handler, path)
}

View File

@@ -221,7 +221,7 @@ For multiple objects:
### Settings
Settings are described in [`@xen-orchestra/backups/Backup.js](https://github.com/vatesfr/xen-orchestra/blob/master/%40xen-orchestra/backups/Backup.js).
Settings are described in [`@xen-orchestra/backups/\_runners/VmsXapi.mjs``](https://github.com/vatesfr/xen-orchestra/blob/master/%40xen-orchestra/backups/_runners/VmsXapi.mjs).
## Writer API

View File

@@ -25,6 +25,8 @@ function formatVmBackup(backup) {
name_description: backup.vm.name_description,
name_label: backup.vm.name_label,
},
differencingVhds: Object.values(backup.isVhdDifferencing).filter(t => t).length,
dynamicVhds: Object.values(backup.isVhdDifferencing).filter(t => !t).length,
}
}

View File

@@ -3,17 +3,17 @@ import pCatch from 'promise-toolbox/catch'
import pRetry from 'promise-toolbox/retry'
import { createLogger } from '@xen-orchestra/log'
import { decorateClass } from '@vates/decorate-with'
import { finished } from 'node:stream'
import { strict as assert } from 'node:assert'
import extractOpaqueRef from './_extractOpaqueRef.mjs'
import NbdClient from '@vates/nbd-client'
import { createNbdRawStream, createNbdVhdStream } from 'vhd-lib/createStreamNbd.js'
import MultiNbdClient from '@vates/nbd-client/multi.mjs'
import { createNbdVhdStream, createNbdRawStream } from 'vhd-lib/createStreamNbd.js'
import { VDI_FORMAT_RAW, VDI_FORMAT_VHD } from './index.mjs'
const { warn } = createLogger('xo:xapi:vdi')
const noop = Function.prototype
class Vdi {
async clone(vdiRef) {
return extractOpaqueRef(await this.callAsync('VDI.clone', vdiRef))
@@ -64,13 +64,13 @@ class Vdi {
})
}
async _getNbdClient(ref) {
async _getNbdClient(ref, { nbdConcurrency = 1 } = {}) {
const nbdInfos = await this.call('VDI.get_nbd_info', ref)
if (nbdInfos.length > 0) {
// a little bit of randomization to spread the load
const nbdInfo = nbdInfos[Math.floor(Math.random() * nbdInfos.length)]
try {
const nbdClient = new NbdClient(nbdInfo, this._nbdOptions)
const nbdClient = new MultiNbdClient(nbdInfos, { ...this._nbdOptions, nbdConcurrency })
await nbdClient.connect()
return nbdClient
} catch (err) {
@@ -83,7 +83,10 @@ class Vdi {
}
}
async exportContent(ref, { baseRef, cancelToken = CancelToken.none, format, preferNbd = this._preferNbd }) {
async exportContent(
ref,
{ baseRef, cancelToken = CancelToken.none, format, nbdConcurrency = 1, preferNbd = this._preferNbd }
) {
const query = {
format,
vdi: ref,
@@ -97,19 +100,23 @@ class Vdi {
let nbdClient, stream
try {
if (preferNbd) {
nbdClient = await this._getNbdClient(ref)
nbdClient = await this._getNbdClient(ref, { nbdConcurrency })
}
// the raw nbd export does not need to peek ath the vhd source
if (nbdClient !== undefined && format === VDI_FORMAT_RAW) {
stream = createNbdRawStream(nbdClient)
} else {
// raw export without nbd or vhd exports needs a resource stream
const vdiName = await this.getField('VDI', ref, 'name_label')
stream = await this.getResource(cancelToken, '/export_raw_vdi/', {
query,
task: await this.task_create(`Exporting content of VDI ${await this.getField('VDI', ref, 'name_label')}`),
task: await this.task_create(`Exporting content of VDI ${vdiName}`),
})
if (nbdClient !== undefined && format === VDI_FORMAT_VHD) {
const taskRef = await this.task_create(`Exporting content of VDI ${vdiName} using NBD`)
stream = await createNbdVhdStream(nbdClient, stream)
stream.on('progress', progress => this.call('task.set_progress', taskRef, progress))
finished(stream, () => this.task_destroy(taskRef))
}
}
return stream

View File

@@ -11,15 +11,25 @@
- [REST API] `/backups` has been renamed to `/backup` (redirections are in place for compatibility)
- [REST API] _VM backup & Replication_ jobs have been moved from `/backup/jobs/:id` to `/backup/jobs/vm/:id` (redirections are in place for compatibility)
- [REST API] _XO config & Pool metadata Backup_ jobs are available at `/backup/jobs/metadata`
- [REST API] _Mirror Backup_ jobs are available at `/backup/jobs/metadata`
- [REST API] _Mirror Backup_ jobs are available at `/backup/jobs/mirror`
- [Plugin/auth-saml] Add _Force re-authentication_ setting [Forum#67764](https://xcp-ng.org/forum/post/67764) (PR [#7232](https://github.com/vatesfr/xen-orchestra/pull/7232))
- [HTTP] `http.useForwardedHeaders` setting can be enabled when XO is behind a reverse proxy to fetch clients IP addresses from `X-Forwarded-*` headers [Forum#67625](https://xcp-ng.org/forum/post/67625) (PR [#7233](https://github.com/vatesfr/xen-orchestra/pull/7233))
- [Backup]Use multiple link to speedup NBD backup (PR [#7216](https://github.com/vatesfr/xen-orchestra/pull/7216))
- [Backup] Show if disk is differential or full in incremental backups (PR [#7222](https://github.com/vatesfr/xen-orchestra/pull/7222))
- [VDI] Create XAPI task during NBD export (PR [#7228](https://github.com/vatesfr/xen-orchestra/pull/7228))
### Bug fixes
- [Backup] Reduce memory consumption when using NBD (PR [#7216](https://github.com/vatesfr/xen-orchestra/pull/7216))
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [REST API] Returns a proper 404 _Not Found_ error when a job does not exist instead of _Internal Server Error_
- [Host/Smart reboot] Automatically retries up to a minute when `HOST_STILL_BOOTING` [#7194](https://github.com/vatesfr/xen-orchestra/issues/7194) (PR [#7231](https://github.com/vatesfr/xen-orchestra/pull/7231))
- [Plugin/transport-slack] Compatibility with other services like Mattermost or Discord [#7130](https://github.com/vatesfr/xen-orchestra/issues/7130) (PR [#7220](https://github.com/vatesfr/xen-orchestra/pull/7220))
- [Host/Network] Fix error "PIF_IS_PHYSICAL" when trying to remove a PIF that had already been physically disconnected [#7193](https://github.com/vatesfr/xen-orchestra/issues/7193) (PR [#7221](https://github.com/vatesfr/xen-orchestra/pull/7221))
- [Mirror backup] Fix backup reports not being sent (PR [#7235](https://github.com/vatesfr/xen-orchestra/pull/7235))
- [RPU] VMs are correctly migrated to their original host (PR [#7238](https://github.com/vatesfr/xen-orchestra/pull/7238))
### Packages to release
@@ -37,8 +47,14 @@
<!--packages-start-->
- @xen-orchestra/xapi patch
- @vates/nbd-client major
- @xen-orchestra/backups patch
- @xen-orchestra/xapi minor
- vhd-lib minor
- xo-server minor
- xo-server-auth-saml minor
- xo-server-transport-email major
- xo-server-transport-slack patch
- xo-web patch
<!--packages-end-->

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

BIN
docs/assets/nbd-enable.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.2 KiB

View File

@@ -64,3 +64,24 @@ For example, with a value of 2, the first two backups will be a key and a delta,
This is important because on rare occasions a backup can be corrupted, and in the case of incremetnal backups, this corruption might impact all the following backups in the chain. Occasionally performing a full backup limits how far a corrupted delta backup can propagate.
The value to use depends on your storage constraints and the frequency of your backups, but a value of 20 is a good start.
## NBD-enabled Backups
You have the option to utilize the NBD network protocol for data transfer instead of the VHD handler generated by the XAPI. NBD-enabled backups generally show improved speed as the load on the Dom0 is reduced.
To enable NBD on the pool network, select the relevant pool, and navigate to the Network tab to modify the parameter:
![](./assets/nbd-connection.png)
This will securely transfer encrypted data from the host to the XOA.
When creating or editing an incremental (previously known as delta) backup and replication for this pool in the future, you have the option to enable NBD in the Advanced settings:
![](./assets/nbd-enable.png)
After the job is completed, you can verify whether NBD was used for the transfer in the backup log:
![](./assets/nbd-backup-log.png)
It's important to note that NBD exports are not currently visible in the task list before 5.90 (december 2023).
To learn more about the evolution of this feature across various XO releases, check out our blog posts for versions [5.76](https://xen-orchestra.com/blog/xen-orchestra-5-76/), [5.81](https://xen-orchestra.comblog/xen-orchestra-5-81/), [5.82](https://xen-orchestra.com/blog/xen-orchestra-5-82/), and [5.86](https://xen-orchestra.com/blog/xen-orchestra-5-86/).

View File

@@ -1,6 +1,6 @@
'use strict'
const { finished, Readable } = require('node:stream')
const { readChunkStrict, skipStrict } = require('@vates/read-chunk')
const { Readable } = require('node:stream')
const { unpackHeader } = require('./Vhd/_utils')
const {
FOOTER_SIZE,
@@ -14,18 +14,38 @@ const {
const { fuHeader, checksumStruct } = require('./_structs')
const assert = require('node:assert')
exports.createNbdRawStream = async function createRawStream(nbdClient) {
const stream = Readable.from(nbdClient.readBlocks())
const MAX_DURATION_BETWEEN_PROGRESS_EMIT = 5e3
const MIN_TRESHOLD_PERCENT_BETWEEN_PROGRESS_EMIT = 1
exports.createNbdRawStream = function createRawStream(nbdClient) {
const exportSize = Number(nbdClient.exportSize)
const chunkSize = 2 * 1024 * 1024
const indexGenerator = function* () {
const nbBlocks = Math.ceil(exportSize / chunkSize)
for (let index = 0; index < nbBlocks; index++) {
yield { index, size: chunkSize }
}
}
const stream = Readable.from(nbdClient.readBlocks(indexGenerator), { objectMode: false })
stream.on('error', () => nbdClient.disconnect())
stream.on('end', () => nbdClient.disconnect())
return stream
}
exports.createNbdVhdStream = async function createVhdStream(nbdClient, sourceStream) {
exports.createNbdVhdStream = async function createVhdStream(
nbdClient,
sourceStream,
{
maxDurationBetweenProgressEmit = MAX_DURATION_BETWEEN_PROGRESS_EMIT,
minTresholdPercentBetweenProgressEmit = MIN_TRESHOLD_PERCENT_BETWEEN_PROGRESS_EMIT,
} = {}
) {
const bufFooter = await readChunkStrict(sourceStream, FOOTER_SIZE)
const header = unpackHeader(await readChunkStrict(sourceStream, HEADER_SIZE))
header.tableOffset = FOOTER_SIZE + HEADER_SIZE
// compute BAT in order
const batSize = Math.ceil((header.maxTableEntries * 4) / SECTOR_SIZE) * SECTOR_SIZE
await skipStrict(sourceStream, header.tableOffset - (FOOTER_SIZE + HEADER_SIZE))
@@ -47,7 +67,6 @@ exports.createNbdVhdStream = async function createVhdStream(nbdClient, sourceStr
precLocator = offset
}
}
header.tableOffset = FOOTER_SIZE + HEADER_SIZE
const rawHeader = fuHeader.pack(header)
checksumStruct(rawHeader, fuHeader)
@@ -69,10 +88,35 @@ exports.createNbdVhdStream = async function createVhdStream(nbdClient, sourceStr
}
}
const totalLength = (offsetSector + blockSizeInSectors + 1) /* end footer */ * SECTOR_SIZE
let lengthRead = 0
let lastUpdate = 0
let lastLengthRead = 0
function throttleEmitProgress() {
const now = Date.now()
if (
lengthRead - lastLengthRead > (minTresholdPercentBetweenProgressEmit / 100) * totalLength ||
(now - lastUpdate > maxDurationBetweenProgressEmit && lengthRead !== lastLengthRead)
) {
stream.emit('progress', lengthRead / totalLength)
lastUpdate = now
lastLengthRead = lengthRead
}
}
function trackAndGet(buffer) {
lengthRead += buffer.length
throttleEmitProgress()
return buffer
}
async function* iterator() {
yield bufFooter
yield rawHeader
yield bat
yield trackAndGet(bufFooter)
yield trackAndGet(rawHeader)
yield trackAndGet(bat)
let precBlocOffset = FOOTER_SIZE + HEADER_SIZE + batSize
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
@@ -82,7 +126,7 @@ exports.createNbdVhdStream = async function createVhdStream(nbdClient, sourceStr
await skipStrict(sourceStream, parentLocatorOffset - precBlocOffset)
const data = await readChunkStrict(sourceStream, space)
precBlocOffset = parentLocatorOffset + space
yield data
yield trackAndGet(data)
}
}
@@ -97,16 +141,20 @@ exports.createNbdVhdStream = async function createVhdStream(nbdClient, sourceStr
})
const bitmap = Buffer.alloc(SECTOR_SIZE, 255)
for await (const block of nbdIterator) {
yield bitmap // don't forget the bitmap before the block
yield block
yield trackAndGet(bitmap) // don't forget the bitmap before the block
yield trackAndGet(block)
}
yield bufFooter
yield trackAndGet(bufFooter)
}
const stream = Readable.from(iterator())
stream.length = (offsetSector + blockSizeInSectors + 1) /* end footer */ * SECTOR_SIZE
const stream = Readable.from(iterator(), { objectMode: false })
stream.length = totalLength
stream._nbd = true
stream.on('error', () => nbdClient.disconnect())
stream.on('end', () => nbdClient.disconnect())
finished(stream, () => {
clearInterval(interval)
nbdClient.disconnect()
})
const interval = setInterval(throttleEmitProgress, maxDurationBetweenProgressEmit)
return stream
}

View File

@@ -223,6 +223,15 @@ class Merger {
)
// ensure data size is correct
await this.#writeState()
try {
// vhd file
this.#state.vhdSize = await parentVhd.getSize()
} catch (err) {
// vhd directory
// parentVhd has its bat already loaded
this.#state.vhdSize = parentVhd.streamSize()
}
this.#onProgress({ total: nBlocks, done: nBlocks })
}
@@ -294,9 +303,10 @@ class Merger {
}
async #cleanup() {
const mergedSize = this.#state?.mergedDataSize ?? 0
const finalVhdSize = this.#state?.vhdSize ?? 0
const mergedDataSize = this.#state?.mergedDataSize ?? 0
await this.#handler.unlink(this.#statePath).catch(warn)
return mergedSize
return { mergedDataSize, finalVhdSize}
}
}

View File

@@ -5,7 +5,6 @@ import ms from 'ms'
import httpRequest from 'http-request-plus'
import map from 'lodash/map.js'
import noop from 'lodash/noop.js'
import ProxyAgent from 'proxy-agent'
import { coalesceCalls } from '@vates/coalesce-calls'
import { Collection } from 'xo-collection'
import { EventEmitter } from 'events'
@@ -13,6 +12,7 @@ import { Index } from 'xo-collection/index.js'
import { cancelable, defer, fromCallback, ignoreErrors, pDelay, pRetry, pTimeout } from 'promise-toolbox'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { decorateClass } from '@vates/decorate-with'
import { ProxyAgent } from 'proxy-agent'
import debug from './_debug.mjs'
import getTaskResult from './_getTaskResult.mjs'
@@ -135,13 +135,12 @@ export class Xapi extends EventEmitter {
}
this._allowUnauthorized = opts.allowUnauthorized
let { httpProxy } = opts
const { httpProxy } = opts
if (httpProxy !== undefined) {
if (httpProxy.startsWith('https:')) {
httpProxy = parseUrl(httpProxy)
httpProxy.rejectUnauthorized = !opts.allowUnauthorized
}
this._httpAgent = new ProxyAgent(httpProxy)
this._httpAgent = new ProxyAgent({
getProxyForUrl: () => httpProxy,
rejectUnauthorized: !opts.allowUnauthorized,
})
}
this._setUrl(url)

View File

@@ -46,7 +46,7 @@
"minimist": "^1.2.0",
"ms": "^2.1.1",
"promise-toolbox": "^0.21.0",
"proxy-agent": "^5.0.0",
"proxy-agent": "^6.3.1",
"pw": "0.0.4",
"xmlrpc": "^1.3.2",
"xo-collection": "^0.5.0"

View File

@@ -25,11 +25,11 @@
"preferGlobal": false,
"main": "dist/",
"engines": {
"node": ">=6"
"node": ">=18"
},
"dependencies": {
"marked": "^11.1.0",
"nodemailer": "^6.1.0",
"nodemailer-markdown": "^1.0.1",
"promise-toolbox": "^0.21.0"
},
"devDependencies": {

View File

@@ -1,5 +1,5 @@
import { createTransport } from 'nodemailer'
import { markdown as nodemailerMarkdown } from 'nodemailer-markdown'
import { Marked } from 'marked'
import { promisify } from 'promise-toolbox'
// ===================================================================
@@ -13,8 +13,6 @@ const removeUndefined = obj => {
return obj
}
const markdownCompiler = nodemailerMarkdown()
const logAndRethrow = error => {
console.error('[WARN] plugin transport-email:', (error && error.stack) || error)
@@ -138,6 +136,7 @@ export const testSchema = {
class TransportEmailPlugin {
constructor({ staticConfig, xo }) {
this._marked = new Marked()
this._staticConfig = staticConfig
this._xo = xo
this._unset = null
@@ -168,7 +167,26 @@ class TransportEmailPlugin {
}
const transport = createTransport({ ...transportConf, ...this._staticConfig.transport }, { from })
transport.use('compile', markdownCompiler)
transport.use('compile', (mail, cb) => {
const data = mail?.data
if (data == null || data.markdown == null || data.html !== undefined) {
return cb()
}
mail.resolveContent(data, 'markdown', (error, markdown) => {
if (error != null) {
return cb(error)
}
markdown = String(markdown)
data.html = this._marked.parse(markdown)
if (data.text === undefined) {
data.text = markdown
}
cb()
})
})
this._send = promisify(transport.sendMail, transport)
}
@@ -187,7 +205,7 @@ class TransportEmailPlugin {
subject: '[Xen Orchestra] Test of transport-email plugin',
markdown: `Hi there,
The transport-email plugin for Xen Orchestra server seems to be working fine, nicely done :)
The \`transport-email\` plugin for *Xen Orchestra* server seems to be working fine, nicely done :)
`,
attachments: [
{

View File

@@ -29,8 +29,7 @@
"node": ">=6"
},
"dependencies": {
"promise-toolbox": "^0.21.0",
"slack-node": "^0.1.8"
"@slack/webhook": "^7.0.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1,5 +1,4 @@
import Slack from 'slack-node'
import { promisify } from 'promise-toolbox'
import { IncomingWebhook } from '@slack/webhook'
// ===================================================================
@@ -49,10 +48,9 @@ class XoServerTransportSlack {
}
configure({ webhookUri, ...conf }) {
const slack = new Slack()
slack.setWebhook(webhookUri)
const slack = new IncomingWebhook(webhookUri)
this._conf = conf
this._send = promisify(slack.webhook)
this._send = slack.send.bind(slack)
}
load() {

View File

@@ -107,6 +107,9 @@ writeBlockConcurrency = 16
enabled = false
threshold = 1000
[http]
useForwardedHeaders = false
# Helmet handles HTTP security via headers
#
# https://helmetjs.github.io/docs/

View File

@@ -107,7 +107,8 @@
"passport": "^0.6.0",
"passport-local": "^1.0.0",
"promise-toolbox": "^0.21.0",
"proxy-agent": "^5.0.0",
"proxy-addr": "^2.0.7",
"proxy-agent": "^6.3.1",
"pug": "^3.0.0",
"pumpify": "^2.0.0",
"pw": "^0.0.4",

View File

@@ -72,6 +72,20 @@
# good enough (e.g. a domain name must be used or there is a reverse proxy).
#publicUrl = 'https://xoa.company.lan'
# Uncomment this if xo-server is behind a reverse proxy to make xo-server use
# X-Forwarded-* headers to determine the IP address of clients.
#
# Accepted values for this setting:
# - false (default): do not use the headers
# - true: always use the headers
# - a list of trusted addresses: the headers will be used only if the connection
# is coming from one of these addresses
#
# More info about the accepted values: https://www.npmjs.com/package/proxy-addr?activeTab=readme#proxyaddrreq-trust
#
# > Note: X-Forwarded-* headers are easily spoofed and the detected IP addresses are unreliable.
#useForwardedHeaders = true
# Settings applied to cookies created by xo-server's embedded HTTP server.
#
# See https://www.npmjs.com/package/cookie#options-1

View File

@@ -22,6 +22,15 @@ const SCHEMA_SETTINGS = {
minimum: 0,
optional: true,
},
nbdConcurrency: {
type: 'number',
minimum: 1,
optional: true,
},
preferNbd: {
type: 'boolean',
optional: true,
},
},
additionalProperties: true,
},
@@ -279,8 +288,8 @@ importVmBackup.params = {
},
useDifferentialRestore: {
type: 'boolean',
optional: true
}
optional: true,
},
}
export function checkBackup({ id, settings, sr }) {

View File

@@ -28,7 +28,7 @@ async function delete_({ pif }) {
return
}
await xapi.callAsync('PIF.destroy', pif._xapiRef)
await xapi.callAsync('PIF.forget', pif._xapiRef)
}
export { delete_ as delete }

View File

@@ -12,7 +12,7 @@ import { format } from 'json-rpc-peer'
import { FAIL_ON_QUEUE } from 'limit-concurrency-decorator'
import { getStreamAsBuffer } from 'get-stream'
import { ignoreErrors } from 'promise-toolbox'
import { invalidParameters, noSuchObject, operationFailed, unauthorized } from 'xo-common/api-errors.js'
import { invalidParameters, noSuchObject, unauthorized } from 'xo-common/api-errors.js'
import { Ref } from 'xen-api'
import { forEach, map, mapFilter, parseSize, safeDateFormat } from '../utils.mjs'
@@ -561,24 +561,14 @@ export async function migrate({
await this.checkPermissions(permissions)
await this.getXapi(vm)
.migrateVm(vm._xapiId, this.getXapi(host), host._xapiId, {
sr: sr && this.getObject(sr, 'SR')._xapiId,
migrationNetworkId: migrationNetwork != null ? migrationNetwork._xapiId : undefined,
mapVifsNetworks: mapVifsNetworksXapi,
mapVdisSrs: mapVdisSrsXapi,
force,
bypassAssert,
})
.catch(error => {
if (error?.code !== undefined) {
// make sure we log the original error
log.warn('vm.migrate', { error })
throw operationFailed({ objectId: vm.id, code: error.code })
}
throw error
})
await this.getXapi(vm).migrateVm(vm._xapiId, this.getXapi(host), host._xapiId, {
sr: sr && this.getObject(sr, 'SR')._xapiId,
migrationNetworkId: migrationNetwork != null ? migrationNetwork._xapiId : undefined,
mapVifsNetworks: mapVifsNetworksXapi,
mapVdisSrs: mapVdisSrsXapi,
force,
bypassAssert,
})
}
migrate.params = {

View File

@@ -147,10 +147,13 @@ export default class Redis extends Collection {
model = this._serialize(model) ?? model
// Generate a new identifier if necessary.
if (model.id === undefined) {
model.id = generateUuid()
let { id } = model
if (id === undefined) {
id = generateUuid()
} else {
// identifier is not stored as value in the database, it's already part of the key
delete model.id
}
const { id } = model
const newEntry = await redis.sAdd(prefix + '_ids', id)
@@ -172,16 +175,7 @@ export default class Redis extends Collection {
}
const key = `${prefix}:${id}`
const props = {}
for (const name of Object.keys(model)) {
if (name !== 'id') {
const value = model[name]
if (value !== undefined) {
props[name] = String(value)
}
}
}
const promises = [redis.del(key), redis.hSet(key, props)]
const promises = [redis.del(key), redis.set(key, JSON.stringify(model))]
// Update indexes.
forEach(indexes, index => {
@@ -206,12 +200,13 @@ export default class Redis extends Collection {
let model
try {
model = await redis.hGetAll(key)
model = await redis.get(key).then(JSON.parse)
} catch (error) {
if (!error.message.startsWith('WRONGTYPE')) {
throw error
}
model = await redis.get(key).then(JSON.parse)
model = await redis.hGetAll(key)
}
return model

View File

@@ -13,6 +13,7 @@ import memoryStoreFactory from 'memorystore'
import merge from 'lodash/merge.js'
import ms from 'ms'
import once from 'lodash/once.js'
import proxyAddr from 'proxy-addr'
import proxyConsole from './proxy-console.mjs'
import pw from 'pw'
import serveStatic from 'serve-static'
@@ -584,7 +585,7 @@ const setUpStaticFiles = (express, opts) => {
// ===================================================================
const setUpApi = (webServer, xo, config) => {
const setUpApi = (webServer, xo, config, useForwardedHeaders) => {
const webSocketServer = new WebSocketServer({
...config.apiWebSocketOptions,
@@ -593,7 +594,7 @@ const setUpApi = (webServer, xo, config) => {
xo.hooks.on('stop', () => fromCallback.call(webSocketServer, 'close'))
const onConnection = (socket, upgradeReq) => {
const { remoteAddress } = upgradeReq.socket
const remoteAddress = proxyAddr(upgradeReq, useForwardedHeaders)
// Create the abstract XO object for this connection.
const connection = xo.createApiConnection(remoteAddress)
@@ -653,7 +654,7 @@ const setUpApi = (webServer, xo, config) => {
const CONSOLE_PROXY_PATH_RE = /^\/api\/consoles\/(.*)$/
const setUpConsoleProxy = (webServer, xo) => {
const setUpConsoleProxy = (webServer, xo, useForwardedHeaders) => {
const webSocketServer = new WebSocketServer({
noServer: true,
})
@@ -678,7 +679,7 @@ const setUpConsoleProxy = (webServer, xo) => {
throw invalidCredentials()
}
const { remoteAddress } = socket
const remoteAddress = proxyAddr(req, useForwardedHeaders)
log.info(`+ Console proxy (${user.name} - ${remoteAddress})`)
const data = {
@@ -836,8 +837,20 @@ export default async function main(args) {
// Trigger a clean job.
await xo.hooks.clean()
const useForwardedHeaders = (() => {
// recompile the fonction when the setting change
let useForwardedHeaders
xo.config.watch('http.useForwardedHeaders', val => {
useForwardedHeaders = typeof val === 'boolean' ? () => val : proxyAddr.compile(val)
})
return (...args) => useForwardedHeaders(...args)
})()
express.set('trust proxy', useForwardedHeaders)
// Must be set up before the API.
setUpConsoleProxy(webServer, xo)
setUpConsoleProxy(webServer, xo, useForwardedHeaders)
// Must be set up before the API.
express.use(xo._handleHttpRequest.bind(xo))
@@ -847,7 +860,7 @@ export default async function main(args) {
await setUpPassport(express, xo, config)
// Must be set up before the static files.
setUpApi(webServer, xo, config)
setUpApi(webServer, xo, config, useForwardedHeaders)
setUpProxies(express, config.http.proxies, xo)

View File

@@ -20,7 +20,10 @@ export class Remotes extends Collection {
_unserialize(remote) {
remote.benchmarks = parseProp('remote', remote, 'benchmarks')
remote.enabled = remote.enabled === 'true'
const { enabled } = remote
remote.enabled = typeof enabled === 'boolean' ? enabled : enabled === 'true'
remote.error = parseProp('remote', remote, 'error', remote.error)
}
}

View File

@@ -534,7 +534,7 @@ export default {
}
// Remember on which hosts the running VMs are
const vmsByHost = mapValues(
const vmRefsByHost = mapValues(
groupBy(
filter(this.objects.all, {
$type: 'VM',
@@ -551,7 +551,7 @@ export default {
return hostId
}
),
vms => vms.map(vm => vm.$id)
vms => vms.map(vm => vm.$ref)
)
// Put master in first position to restart it first
@@ -623,20 +623,23 @@ export default {
continue
}
const vmIds = vmsByHost[hostId]
const vmRefs = vmRefsByHost[hostId]
if (vmIds === undefined) {
if (vmRefs === undefined) {
continue
}
const residentVms = host.$resident_VMs.map(vm => vm.uuid)
// host.$resident_VMs is outdated and returns resident VMs before the host.evacuate.
// this.getField is used in order not to get cached data.
const residentVmRefs = await this.getField('host', host.$ref, 'resident_VMs')
for (const vmId of vmIds) {
if (residentVms.includes(vmId)) {
for (const vmRef of vmRefs) {
if (residentVmRefs.includes(vmRef)) {
continue
}
try {
const vmId = await this.getField('VM', vmRef, 'uuid')
await this.migrateVm(vmId, this, hostId)
} catch (err) {
log.error(err)

View File

@@ -1,5 +1,5 @@
import hrp from 'http-request-plus'
import ProxyAgent from 'proxy-agent'
import { ProxyAgent } from 'proxy-agent'
const { env } = process

View File

@@ -586,6 +586,7 @@ const messages = {
editJobNotFound: "The job you're trying to edit wasn't found",
preferNbd: 'Use NBD protocol to transfer disk if available',
preferNbdInformation: 'A network accessible by XO or the proxy must have NBD enabled',
nbdConcurrency: 'Number of NBD connexion per disk',
// ------ New Remote -----
newRemote: 'New file system remote',
@@ -1789,6 +1790,9 @@ const messages = {
availableBackupsColumn: 'Available Backups',
backupRestoreErrorTitle: 'Missing parameters',
backupRestoreErrorMessage: 'Choose a SR and a backup',
backupisKey: 'key',
backupIsIncremental: 'incremental',
backupIsDifferencing: 'differencing',
vmsToBackup: 'VMs to backup',
refreshBackupList: 'Refresh backup list',
restoreVmBackups: 'Restore',

View File

@@ -641,24 +641,39 @@ const xoItemToRender = {
<span>{group.name_label.startsWith('Group of ') ? group.name_label.slice(9) : group.name_label}</span>
),
backup: backup => (
<span>
<span className='tag tag-info' style={{ textTransform: 'capitalize' }}>
{backup.mode}
</span>{' '}
<span className='tag tag-warning'>{backup.remote.name}</span>{' '}
{backup.size !== undefined && <span className='tag tag-info'>{formatSize(backup.size)}</span>}{' '}
<FormattedDate
value={new Date(backup.timestamp)}
month='long'
day='numeric'
year='numeric'
hour='2-digit'
minute='2-digit'
second='2-digit'
/>
</span>
),
backup: backup => {
const nbDifferencings = backup.differencingVhds ?? 0
const nbKeys = backup.dynamicVhds ?? 0
const nbDisks = backup.disks.length
return (
<span>
<span className='tag tag-info' style={{ textTransform: 'capitalize' }}>
{backup.mode === 'delta' ? _('backupIsIncremental') : backup.mode}
</span>{' '}
<span className='tag tag-warning'>{backup.remote.name}</span>{' '}
{nbDifferencings > 0 && (
<span className='tag tag-info'>
{nbDifferencings < nbDisks && `${nbDifferencings}/${nbDisks}`} {_('backupIsDifferencing')}{' '}
</span>
)}
{nbKeys > 0 && (
<span className='tag tag-info'>
{nbKeys < nbDisks && `${nbKeys}/${nbDisks}`} {_('backupisKey')}{' '}
</span>
)}
{backup.size !== undefined && <span className='tag tag-info'>{formatSize(backup.size)}</span>}{' '}
<FormattedDate
value={new Date(backup.timestamp)}
month='long'
day='numeric'
year='numeric'
hour='2-digit'
minute='2-digit'
second='2-digit'
/>
</span>
)
},
}
const renderXoItem = (item, { className, type: xoType, ...props } = {}) => {

View File

@@ -188,6 +188,7 @@ const getInitialState = ({ preSelectedVmIds, setHomeVmIdsSelection, suggestedExc
deltaMode: false,
drMode: false,
name: '',
nbdConcurrency: 1,
preferNbd: false,
remotes: [],
schedules: {},
@@ -629,6 +630,11 @@ const New = decorate([
preferNbd,
})
},
setNbdConcurrency({ setGlobalSettings }, nbdConcurrency) {
setGlobalSettings({
nbdConcurrency,
})
},
},
computed: {
compressionId: generateId,
@@ -637,6 +643,7 @@ const New = decorate([
inputFullIntervalId: generateId,
inputMaxExportRate: generateId,
inputPreferNbd: generateId,
inputNbdConcurrency: generateId,
inputTimeoutId: generateId,
// In order to keep the user preference, the offline backup is kept in the DB
@@ -748,6 +755,7 @@ const New = decorate([
concurrency,
fullInterval,
maxExportRate,
nbdConcurrency = 1,
offlineBackup,
offlineSnapshot,
preferNbd,
@@ -1040,6 +1048,20 @@ const New = decorate([
/>
</FormGroup>
)}
{state.isDelta && (
<FormGroup>
<label htmlFor={state.inputNbdConcurrency}>
<strong>{_('nbdConcurrency')}</strong>
</label>
<Number
id={state.inputNbdConcurrency}
min={1}
onChange={effects.setNbdConcurrency}
value={nbdConcurrency}
disabled={!state.inputPreferNbd}
/>
</FormGroup>
)}
<FormGroup>
<label htmlFor={state.inputMaxExportRate}>
<strong>{_('speedLimit')}</strong>

View File

@@ -68,7 +68,7 @@ const normalize = state => {
schedules = mapValues(schedules, ({ id, ...schedule }) => schedule)
settings[''] = {
...advancedSettings,
reportWhen: reportWhen.value,
reportWhen: reportWhen.value ?? reportWhen,
reportRecipients: reportRecipients.length !== 0 ? reportRecipients : undefined,
}
return {

646
yarn.lock

File diff suppressed because it is too large Load Diff