Compare commits
56 Commits
xo-lite-v0
...
feat_shown
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
42ddadac7b | ||
|
|
41ed5625be | ||
|
|
e66bcf2a5c | ||
|
|
c40e71ed49 | ||
|
|
439c721472 | ||
|
|
99429edf23 | ||
|
|
cec8237a47 | ||
|
|
e13d55bfa9 | ||
|
|
141c141516 | ||
|
|
7a47d23191 | ||
|
|
7a8bf671fb | ||
|
|
7f83a3e55e | ||
|
|
7f8ab07692 | ||
|
|
2634008a6a | ||
|
|
4c652a457f | ||
|
|
89dc40a1c5 | ||
|
|
04a7982801 | ||
|
|
df9b59f980 | ||
|
|
fe215a53af | ||
|
|
0559c843c4 | ||
|
|
79967e0eec | ||
|
|
847ad63c09 | ||
|
|
fc1357db93 | ||
|
|
b644cbe28d | ||
|
|
7ddfb2a684 | ||
|
|
5a0cfd86c7 | ||
|
|
70e3ba17af | ||
|
|
4784bbfb99 | ||
|
|
ceddddd7f2 | ||
|
|
32afd5c463 | ||
|
|
ac391f6a0f | ||
|
|
a0b50b47ef | ||
|
|
e3618416bf | ||
|
|
37fd6d13db | ||
|
|
eb56666f98 | ||
|
|
b7daee81c0 | ||
|
|
bee0eb9091 | ||
|
|
59a9a63971 | ||
|
|
a2e8b999da | ||
|
|
489ad51b4d | ||
|
|
7db2516a38 | ||
|
|
1141ef524f | ||
|
|
f449258ed3 | ||
|
|
bb3b83c690 | ||
|
|
2b973275c0 | ||
|
|
037e1c1dfa | ||
|
|
f0da94081b | ||
|
|
cd44a6e28c | ||
|
|
70b09839c7 | ||
|
|
12140143d2 | ||
|
|
e68236c9f2 | ||
|
|
8a1a0d76f7 | ||
|
|
4a5bc5dccc | ||
|
|
0ccdfbd6f4 | ||
|
|
75af7668b5 | ||
|
|
0b454fa670 |
@@ -68,6 +68,11 @@ module.exports = {
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
|
||||
// this rule can prevent race condition bugs like parallel `a += await foo()`
|
||||
//
|
||||
// as it has a lots of false positive, it is only enabled as a warning for now
|
||||
'require-atomic-updates': 'warn',
|
||||
|
||||
strict: 'error',
|
||||
},
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import { connect } from 'node:tls'
|
||||
import { fromCallback, pRetry, pDelay, pTimeout, pFromCallback } from 'promise-toolbox'
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import {
|
||||
INIT_PASSWD,
|
||||
NBD_CMD_READ,
|
||||
@@ -21,8 +20,6 @@ import {
|
||||
OPTS_MAGIC,
|
||||
NBD_CMD_DISC,
|
||||
} from './constants.mjs'
|
||||
import { Readable } from 'node:stream'
|
||||
|
||||
const { warn } = createLogger('vates:nbd-client')
|
||||
|
||||
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
|
||||
@@ -125,6 +122,8 @@ export default class NbdClient {
|
||||
if (!this.#connected) {
|
||||
return
|
||||
}
|
||||
this.#connected = false
|
||||
const socket = this.#serverSocket
|
||||
|
||||
const queryId = this.#nextCommandQueryId
|
||||
this.#nextCommandQueryId++
|
||||
@@ -137,12 +136,12 @@ export default class NbdClient {
|
||||
buffer.writeBigUInt64BE(0n, 16)
|
||||
buffer.writeInt32BE(0, 24)
|
||||
const promise = pFromCallback(cb => {
|
||||
this.#serverSocket.end(buffer, 'utf8', cb)
|
||||
socket.end(buffer, 'utf8', cb)
|
||||
})
|
||||
try {
|
||||
await pTimeout.call(promise, this.#messageTimeout)
|
||||
} catch (error) {
|
||||
this.#serverSocket.destroy()
|
||||
socket.destroy()
|
||||
}
|
||||
this.#serverSocket = undefined
|
||||
this.#connected = false
|
||||
@@ -290,7 +289,7 @@ export default class NbdClient {
|
||||
}
|
||||
}
|
||||
|
||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||
async #readBlock(index, size) {
|
||||
// we don't want to add anything in backlog while reconnecting
|
||||
if (this.#reconnectingPromise) {
|
||||
await this.#reconnectingPromise
|
||||
@@ -338,57 +337,13 @@ export default class NbdClient {
|
||||
})
|
||||
}
|
||||
|
||||
async *readBlocks(indexGenerator = 2 * 1024 * 1024) {
|
||||
// default : read all blocks
|
||||
if (typeof indexGenerator === 'number') {
|
||||
const exportSize = Number(this.#exportSize)
|
||||
const chunkSize = indexGenerator
|
||||
|
||||
indexGenerator = function* () {
|
||||
const nbBlocks = Math.ceil(exportSize / chunkSize)
|
||||
for (let index = 0; index < nbBlocks; index++) {
|
||||
yield { index, size: chunkSize }
|
||||
}
|
||||
}
|
||||
}
|
||||
const readAhead = []
|
||||
const readAheadMaxLength = this.#readAhead
|
||||
const makeReadBlockPromise = (index, size) => {
|
||||
const promise = pRetry(() => this.readBlock(index, size), {
|
||||
tries: this.#readBlockRetries,
|
||||
onRetry: async err => {
|
||||
warn('will retry reading block ', index, err)
|
||||
await this.reconnect()
|
||||
},
|
||||
})
|
||||
// error is handled during unshift
|
||||
promise.catch(() => {})
|
||||
return promise
|
||||
}
|
||||
|
||||
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
|
||||
for (const { index, size } of indexGenerator()) {
|
||||
// stack readAheadMaxLength promises before starting to handle the results
|
||||
if (readAhead.length === readAheadMaxLength) {
|
||||
// any error will stop reading blocks
|
||||
yield readAhead.shift()
|
||||
}
|
||||
|
||||
readAhead.push(makeReadBlockPromise(index, size))
|
||||
}
|
||||
while (readAhead.length > 0) {
|
||||
yield readAhead.shift()
|
||||
}
|
||||
}
|
||||
|
||||
stream(chunkSize) {
|
||||
async function* iterator() {
|
||||
for await (const chunk of this.readBlocks(chunkSize)) {
|
||||
yield chunk
|
||||
}
|
||||
}
|
||||
// create a readable stream instead of returning the iterator
|
||||
// since iterators don't like unshift and partial reading
|
||||
return Readable.from(iterator())
|
||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||
return pRetry(() => this.#readBlock(index, size), {
|
||||
tries: this.#readBlockRetries,
|
||||
onRetry: async err => {
|
||||
warn('will retry reading block ', index, err)
|
||||
await this.reconnect()
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
87
@vates/nbd-client/multi.mjs
Normal file
87
@vates/nbd-client/multi.mjs
Normal file
@@ -0,0 +1,87 @@
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { NBD_DEFAULT_BLOCK_SIZE } from './constants.mjs'
|
||||
import NbdClient from './index.mjs'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
const { warn } = createLogger('vates:nbd-client:multi')
|
||||
export default class MultiNbdClient {
|
||||
#clients = []
|
||||
#readAhead
|
||||
|
||||
get exportSize() {
|
||||
return this.#clients[0].exportSize
|
||||
}
|
||||
|
||||
constructor(settings, { nbdConcurrency = 8, readAhead = 16, ...options } = {}) {
|
||||
this.#readAhead = readAhead
|
||||
if (!Array.isArray(settings)) {
|
||||
settings = [settings]
|
||||
}
|
||||
for (let i = 0; i < nbdConcurrency; i++) {
|
||||
this.#clients.push(
|
||||
new NbdClient(settings[i % settings.length], { ...options, readAhead: Math.ceil(readAhead / nbdConcurrency) })
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async connect() {
|
||||
const connectedClients = []
|
||||
for (const clientId in this.#clients) {
|
||||
const client = this.#clients[clientId]
|
||||
try {
|
||||
await client.connect()
|
||||
connectedClients.push(client)
|
||||
} catch (err) {
|
||||
client.disconnect().catch(() => {})
|
||||
warn(`can't connect to one nbd client`, { err })
|
||||
}
|
||||
}
|
||||
if (connectedClients.length === 0) {
|
||||
throw new Error(`Fail to connect to any Nbd client`)
|
||||
}
|
||||
if (connectedClients.length < this.#clients.length) {
|
||||
warn(
|
||||
`incomplete connection by multi Nbd, only ${connectedClients.length} over ${
|
||||
this.#clients.length
|
||||
} expected clients`
|
||||
)
|
||||
this.#clients = connectedClients
|
||||
}
|
||||
}
|
||||
|
||||
async disconnect() {
|
||||
await asyncEach(this.#clients, client => client.disconnect(), {
|
||||
stopOnError: false,
|
||||
})
|
||||
}
|
||||
|
||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||
const clientId = index % this.#clients.length
|
||||
return this.#clients[clientId].readBlock(index, size)
|
||||
}
|
||||
|
||||
async *readBlocks(indexGenerator) {
|
||||
// default : read all blocks
|
||||
const readAhead = []
|
||||
const makeReadBlockPromise = (index, size) => {
|
||||
const promise = this.readBlock(index, size)
|
||||
// error is handled during unshift
|
||||
promise.catch(() => {})
|
||||
return promise
|
||||
}
|
||||
|
||||
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
|
||||
for (const { index, size } of indexGenerator()) {
|
||||
// stack readAheadMaxLength promises before starting to handle the results
|
||||
if (readAhead.length === this.#readAhead) {
|
||||
// any error will stop reading blocks
|
||||
yield readAhead.shift()
|
||||
}
|
||||
|
||||
readAhead.push(makeReadBlockPromise(index, size))
|
||||
}
|
||||
while (readAhead.length > 0) {
|
||||
yield readAhead.shift()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
import NbdClient from '../index.mjs'
|
||||
import { spawn, exec } from 'node:child_process'
|
||||
import fs from 'node:fs/promises'
|
||||
import { test } from 'tap'
|
||||
@@ -7,8 +6,10 @@ import { pFromCallback } from 'promise-toolbox'
|
||||
import { Socket } from 'node:net'
|
||||
import { NBD_DEFAULT_PORT } from '../constants.mjs'
|
||||
import assert from 'node:assert'
|
||||
import MultiNbdClient from '../multi.mjs'
|
||||
|
||||
const FILE_SIZE = 10 * 1024 * 1024
|
||||
const CHUNK_SIZE = 1024 * 1024 // non default size
|
||||
const FILE_SIZE = 1024 * 1024 * 9.5 // non aligned file size
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
@@ -81,7 +82,7 @@ test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
let nbdServer = await spawnNbdKit(path)
|
||||
const client = new NbdClient(
|
||||
const client = new MultiNbdClient(
|
||||
{
|
||||
address: '127.0.0.1',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
@@ -109,13 +110,13 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
`,
|
||||
},
|
||||
{
|
||||
nbdConcurrency: 1,
|
||||
readAhead: 2,
|
||||
}
|
||||
)
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const CHUNK_SIZE = 1024 * 1024 // non default size
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
@@ -127,9 +128,9 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
})
|
||||
let i = 0
|
||||
for await (const block of nbdIterator) {
|
||||
let blockOk = true
|
||||
let blockOk = block.length === Math.min(CHUNK_SIZE, FILE_SIZE - CHUNK_SIZE * i)
|
||||
let firstFail
|
||||
for (let j = 0; j < CHUNK_SIZE; j += 4) {
|
||||
for (let j = 0; j < block.length; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
@@ -137,7 +138,7 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content`)
|
||||
tap.ok(blockOk, `check block ${i} content ${block.length}`)
|
||||
i++
|
||||
|
||||
// flaky server is flaky
|
||||
@@ -147,17 +148,6 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
nbdServer = await spawnNbdKit(path)
|
||||
}
|
||||
}
|
||||
|
||||
// we can reuse the conneciton to read other blocks
|
||||
// default iterator
|
||||
const nbdIteratorWithDefaultBlockIterator = client.readBlocks()
|
||||
let nb = 0
|
||||
for await (const block of nbdIteratorWithDefaultBlockIterator) {
|
||||
nb++
|
||||
tap.equal(block.length, 2 * 1024 * 1024)
|
||||
}
|
||||
|
||||
tap.equal(nb, 5)
|
||||
assert.rejects(() => client.readBlock(100, CHUNK_SIZE))
|
||||
|
||||
await client.disconnect()
|
||||
|
||||
@@ -67,6 +67,11 @@ async function generateVhd(path, opts = {}) {
|
||||
await VhdAbstract.createAlias(handler, path + '.alias.vhd', dataPath)
|
||||
}
|
||||
|
||||
if (opts.blocks) {
|
||||
for (const blockId of opts.blocks) {
|
||||
await vhd.writeEntireBlock({ id: blockId, buffer: Buffer.alloc(2 * 1024 * 1024 + 512, blockId) })
|
||||
}
|
||||
}
|
||||
await vhd.writeBlockAllocationTable()
|
||||
await vhd.writeHeader()
|
||||
await vhd.writeFooter()
|
||||
@@ -230,7 +235,7 @@ test('it merges delta of non destroyed chain', async () => {
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
|
||||
// size should be the size of children + grand children after the merge
|
||||
assert.equal(metadata.size, 209920)
|
||||
assert.equal(metadata.size, 104960)
|
||||
|
||||
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
|
||||
// only check deletion
|
||||
@@ -320,6 +325,7 @@ describe('tests multiple combination ', () => {
|
||||
const ancestor = await generateVhd(`${basePath}/ancestor.vhd`, {
|
||||
useAlias,
|
||||
mode: vhdMode,
|
||||
blocks: [1, 3],
|
||||
})
|
||||
const child = await generateVhd(`${basePath}/child.vhd`, {
|
||||
useAlias,
|
||||
@@ -328,6 +334,7 @@ describe('tests multiple combination ', () => {
|
||||
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||
parentUuid: ancestor.footer.uuid,
|
||||
},
|
||||
blocks: [1, 2],
|
||||
})
|
||||
// a grand child vhd in metadata
|
||||
await generateVhd(`${basePath}/grandchild.vhd`, {
|
||||
@@ -337,6 +344,7 @@ describe('tests multiple combination ', () => {
|
||||
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||
parentUuid: child.footer.uuid,
|
||||
},
|
||||
blocks: [2, 3],
|
||||
})
|
||||
|
||||
// an older parent that was merging in clean
|
||||
@@ -395,7 +403,7 @@ describe('tests multiple combination ', () => {
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
|
||||
// size should be the size of children + grand children + clean after the merge
|
||||
assert.deepEqual(metadata.size, vhdMode === 'file' ? 314880 : undefined)
|
||||
assert.deepEqual(metadata.size, vhdMode === 'file' ? 6502400 : 6501888)
|
||||
|
||||
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
|
||||
// ancestor and child should be merged
|
||||
|
||||
@@ -36,34 +36,32 @@ const computeVhdsSize = (handler, vhdPaths) =>
|
||||
)
|
||||
|
||||
// chain is [ ancestor, child_1, ..., child_n ]
|
||||
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
|
||||
if (merge) {
|
||||
logInfo(`merging VHD chain`, { chain })
|
||||
async function _mergeVhdChain(handler, chain, { logInfo, remove, mergeBlockConcurrency }) {
|
||||
logInfo(`merging VHD chain`, { chain })
|
||||
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
logInfo('merge in progress', {
|
||||
done,
|
||||
parent: chain[0],
|
||||
progress: Math.round((100 * done) / total),
|
||||
total,
|
||||
})
|
||||
}
|
||||
}, 10e3)
|
||||
try {
|
||||
return await mergeVhdChain(handler, chain, {
|
||||
logInfo,
|
||||
mergeBlockConcurrency,
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
removeUnused: remove,
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
logInfo('merge in progress', {
|
||||
done,
|
||||
parent: chain[0],
|
||||
progress: Math.round((100 * done) / total),
|
||||
total,
|
||||
})
|
||||
} finally {
|
||||
clearInterval(handle)
|
||||
}
|
||||
}, 10e3)
|
||||
try {
|
||||
return await mergeVhdChain(handler, chain, {
|
||||
logInfo,
|
||||
mergeBlockConcurrency,
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
removeUnused: remove,
|
||||
})
|
||||
} finally {
|
||||
clearInterval(handle)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -471,23 +469,20 @@ export async function cleanVm(
|
||||
const metadataWithMergedVhd = {}
|
||||
const doMerge = async () => {
|
||||
await asyncMap(toMerge, async chain => {
|
||||
const merged = await limitedMergeVhdChain(handler, chain, {
|
||||
const { finalVhdSize } = await limitedMergeVhdChain(handler, chain, {
|
||||
logInfo,
|
||||
logWarn,
|
||||
remove,
|
||||
merge,
|
||||
mergeBlockConcurrency,
|
||||
})
|
||||
if (merged !== undefined) {
|
||||
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
|
||||
metadataWithMergedVhd[metadataPath] = true
|
||||
}
|
||||
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
|
||||
metadataWithMergedVhd[metadataPath] = (metadataWithMergedVhd[metadataPath] ?? 0) + finalVhdSize
|
||||
})
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
...unusedVhdsDeletion,
|
||||
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
|
||||
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : () => Promise.resolve()),
|
||||
asyncMap(unusedXvas, path => {
|
||||
logWarn('unused XVA', { path })
|
||||
if (remove) {
|
||||
@@ -509,12 +504,11 @@ export async function cleanVm(
|
||||
|
||||
// update size for delta metadata with merged VHD
|
||||
// check for the other that the size is the same as the real file size
|
||||
|
||||
await asyncMap(jsons, async metadataPath => {
|
||||
const metadata = backups.get(metadataPath)
|
||||
|
||||
let fileSystemSize
|
||||
const merged = metadataWithMergedVhd[metadataPath] !== undefined
|
||||
const mergedSize = metadataWithMergedVhd[metadataPath]
|
||||
|
||||
const { mode, size, vhds, xva } = metadata
|
||||
|
||||
@@ -524,26 +518,29 @@ export async function cleanVm(
|
||||
const linkedXva = resolve('/', vmDir, xva)
|
||||
try {
|
||||
fileSystemSize = await handler.getSize(linkedXva)
|
||||
if (fileSystemSize !== size && fileSystemSize !== undefined) {
|
||||
logWarn('cleanVm: incorrect backup size in metadata', {
|
||||
path: metadataPath,
|
||||
actual: size ?? 'none',
|
||||
expected: fileSystemSize,
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
// can fail with encrypted remote
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
|
||||
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
|
||||
|
||||
// the size is not computed in some cases (e.g. VhdDirectory)
|
||||
if (fileSystemSize === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// don't warn if the size has changed after a merge
|
||||
if (!merged && fileSystemSize !== size) {
|
||||
// FIXME: figure out why it occurs so often and, once fixed, log the real problems with `logWarn`
|
||||
console.warn('cleanVm: incorrect backup size in metadata', {
|
||||
path: metadataPath,
|
||||
actual: size ?? 'none',
|
||||
expected: fileSystemSize,
|
||||
})
|
||||
if (mergedSize === undefined) {
|
||||
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
|
||||
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
|
||||
// the size is not computed in some cases (e.g. VhdDirectory)
|
||||
if (fileSystemSize !== undefined && fileSystemSize !== size) {
|
||||
logWarn('cleanVm: incorrect backup size in metadata', {
|
||||
path: metadataPath,
|
||||
actual: size ?? 'none',
|
||||
expected: fileSystemSize,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
@@ -551,9 +548,19 @@ export async function cleanVm(
|
||||
return
|
||||
}
|
||||
|
||||
// systematically update size after a merge
|
||||
if ((merged || fixMetadata) && size !== fileSystemSize) {
|
||||
metadata.size = fileSystemSize
|
||||
// systematically update size and differentials after a merge
|
||||
|
||||
// @todo : after 2024-04-01 remove the fixmetadata options since the size computation is fixed
|
||||
if (mergedSize || (fixMetadata && fileSystemSize !== size)) {
|
||||
metadata.size = mergedSize ?? fileSystemSize ?? size
|
||||
|
||||
if (mergedSize) {
|
||||
// all disks are now key disk
|
||||
metadata.isVhdDifferencing = {}
|
||||
for (const id of Object.values(metadata.vdis ?? {})) {
|
||||
metadata.isVhdDifferencing[`${id}.vhd`] = false
|
||||
}
|
||||
}
|
||||
mustRegenerateCache = true
|
||||
try {
|
||||
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
|
||||
|
||||
@@ -34,6 +34,7 @@ export async function exportIncrementalVm(
|
||||
fullVdisRequired = new Set(),
|
||||
|
||||
disableBaseTags = false,
|
||||
nbdConcurrency = 1,
|
||||
preferNbd,
|
||||
} = {}
|
||||
) {
|
||||
@@ -82,6 +83,7 @@ export async function exportIncrementalVm(
|
||||
baseRef: baseVdi?.$ref,
|
||||
cancelToken,
|
||||
format: 'vhd',
|
||||
nbdConcurrency,
|
||||
preferNbd,
|
||||
})
|
||||
})
|
||||
|
||||
@@ -32,10 +32,10 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
useChain: false,
|
||||
})
|
||||
|
||||
const differentialVhds = {}
|
||||
const isVhdDifferencing = {}
|
||||
|
||||
await asyncEach(Object.entries(incrementalExport.streams), async ([key, stream]) => {
|
||||
differentialVhds[key] = await isVhdDifferencingDisk(stream)
|
||||
isVhdDifferencing[key] = await isVhdDifferencingDisk(stream)
|
||||
})
|
||||
|
||||
incrementalExport.streams = mapValues(incrementalExport.streams, this._throttleStream)
|
||||
@@ -43,7 +43,7 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(incrementalExport),
|
||||
differentialVhds,
|
||||
isVhdDifferencing,
|
||||
timestamp: metadata.timestamp,
|
||||
vm: metadata.vm,
|
||||
vmSnapshot: metadata.vmSnapshot,
|
||||
|
||||
@@ -41,6 +41,7 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
|
||||
const deltaExport = await exportIncrementalVm(exportedVm, baseVm, {
|
||||
fullVdisRequired,
|
||||
nbdConcurrency: this._settings.nbdConcurrency,
|
||||
preferNbd: this._settings.preferNbd,
|
||||
})
|
||||
// since NBD is network based, if one disk use nbd , all the disk use them
|
||||
@@ -49,11 +50,11 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
Task.info('Transfer data using NBD')
|
||||
}
|
||||
|
||||
const differentialVhds = {}
|
||||
const isVhdDifferencing = {}
|
||||
// since isVhdDifferencingDisk is reading and unshifting data in stream
|
||||
// it should be done BEFORE any other stream transform
|
||||
await asyncEach(Object.entries(deltaExport.streams), async ([key, stream]) => {
|
||||
differentialVhds[key] = await isVhdDifferencingDisk(stream)
|
||||
isVhdDifferencing[key] = await isVhdDifferencingDisk(stream)
|
||||
})
|
||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||
|
||||
@@ -68,7 +69,7 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(deltaExport),
|
||||
differentialVhds,
|
||||
isVhdDifferencing,
|
||||
sizeContainers,
|
||||
timestamp,
|
||||
vm,
|
||||
|
||||
@@ -133,7 +133,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
}
|
||||
}
|
||||
|
||||
async _transfer($defer, { differentialVhds, timestamp, deltaExport, vm, vmSnapshot }) {
|
||||
async _transfer($defer, { isVhdDifferencing, timestamp, deltaExport, vm, vmSnapshot }) {
|
||||
const adapter = this._adapter
|
||||
const job = this._job
|
||||
const scheduleId = this._scheduleId
|
||||
@@ -161,6 +161,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
)
|
||||
|
||||
metadataContent = {
|
||||
isVhdDifferencing,
|
||||
jobId,
|
||||
mode: job.mode,
|
||||
scheduleId,
|
||||
@@ -180,9 +181,9 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
async ([id, vdi]) => {
|
||||
const path = `${this._vmBackupDir}/${vhds[id]}`
|
||||
|
||||
const isDelta = differentialVhds[`${id}.vhd`]
|
||||
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
|
||||
let parentPath
|
||||
if (isDelta) {
|
||||
if (isDifferencing) {
|
||||
const vdiDir = dirname(path)
|
||||
parentPath = (
|
||||
await handler.list(vdiDir, {
|
||||
@@ -204,16 +205,20 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
// TODO remove when this has been done before the export
|
||||
await checkVhd(handler, parentPath)
|
||||
}
|
||||
|
||||
transferSize += await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
|
||||
|
||||
// don't write it as transferSize += await async function
|
||||
// since i += await asyncFun lead to race condition
|
||||
// as explained : https://eslint.org/docs/latest/rules/require-atomic-updates
|
||||
const transferSizeOneDisk = await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
|
||||
// no checksum for VHDs, because they will be invalidated by
|
||||
// merges and chainings
|
||||
checksum: false,
|
||||
validator: tmpPath => checkVhd(handler, tmpPath),
|
||||
writeBlockConcurrency: this._config.writeBlockConcurrency,
|
||||
})
|
||||
transferSize += transferSizeOneDisk
|
||||
|
||||
if (isDelta) {
|
||||
if (isDifferencing) {
|
||||
await chainVhd(handler, parentPath, handler, path)
|
||||
}
|
||||
|
||||
|
||||
@@ -221,7 +221,7 @@ For multiple objects:
|
||||
|
||||
### Settings
|
||||
|
||||
Settings are described in [`@xen-orchestra/backups/Backup.js](https://github.com/vatesfr/xen-orchestra/blob/master/%40xen-orchestra/backups/Backup.js).
|
||||
Settings are described in [`@xen-orchestra/backups/\_runners/VmsXapi.mjs``](https://github.com/vatesfr/xen-orchestra/blob/master/%40xen-orchestra/backups/_runners/VmsXapi.mjs).
|
||||
|
||||
## Writer API
|
||||
|
||||
|
||||
@@ -25,6 +25,8 @@ function formatVmBackup(backup) {
|
||||
name_description: backup.vm.name_description,
|
||||
name_label: backup.vm.name_label,
|
||||
},
|
||||
differencingVhds: Object.values(backup.isVhdDifferencing).filter(t => t).length,
|
||||
dynamicVhds: Object.values(backup.isVhdDifferencing).filter(t => !t).length,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
## **next**
|
||||
|
||||
- [VM/Action] Ability to migrate a VM from its view (PR [#7164](https://github.com/vatesfr/xen-orchestra/pull/7164))
|
||||
- Ability to override host address with `master` URL query param (PR [#7187](https://github.com/vatesfr/xen-orchestra/pull/7187))
|
||||
- Added tooltip on CPU provisioning warning icon (PR [#7223](https://github.com/vatesfr/xen-orchestra/pull/7223))
|
||||
- Add indeterminate state on FormToggle component (PR [#7230](https://github.com/vatesfr/xen-orchestra/pull/7230))
|
||||
- Add new UiStatusPanel component (PR [#7227](https://github.com/vatesfr/xen-orchestra/pull/7227))
|
||||
|
||||
## **0.1.6** (2023-11-30)
|
||||
|
||||
- Explicit error if users attempt to connect from a slave host (PR [#7110](https://github.com/vatesfr/xen-orchestra/pull/7110))
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
</RouterLink>
|
||||
<slot />
|
||||
<div class="right">
|
||||
<PoolOverrideWarning as-tooltip />
|
||||
<AccountButton />
|
||||
</div>
|
||||
</header>
|
||||
@@ -19,6 +20,7 @@
|
||||
|
||||
<script lang="ts" setup>
|
||||
import AccountButton from "@/components/AccountButton.vue";
|
||||
import PoolOverrideWarning from "@/components/PoolOverrideWarning.vue";
|
||||
import TextLogo from "@/components/TextLogo.vue";
|
||||
import UiIcon from "@/components/ui/icon/UiIcon.vue";
|
||||
import { useNavigationStore } from "@/stores/navigation.store";
|
||||
@@ -51,6 +53,10 @@ const { trigger: navigationTrigger } = storeToRefs(navigationStore);
|
||||
margin-left: 1rem;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.warning-not-current-pool {
|
||||
font-size: 2.4rem;
|
||||
}
|
||||
}
|
||||
|
||||
.right {
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
<div class="app-login form-container">
|
||||
<form @submit.prevent="handleSubmit">
|
||||
<img alt="XO Lite" src="../assets/logo-title.svg" />
|
||||
<PoolOverrideWarning />
|
||||
<p v-if="isHostIsSlaveErr(error)" class="error">
|
||||
<UiIcon :icon="faExclamationCircle" />
|
||||
{{ $t("login-only-on-master") }}
|
||||
@@ -45,6 +46,7 @@ import FormCheckbox from "@/components/form/FormCheckbox.vue";
|
||||
import FormInput from "@/components/form/FormInput.vue";
|
||||
import FormInputWrapper from "@/components/form/FormInputWrapper.vue";
|
||||
import LoginError from "@/components/LoginError.vue";
|
||||
import PoolOverrideWarning from "@/components/PoolOverrideWarning.vue";
|
||||
import UiButton from "@/components/ui/UiButton.vue";
|
||||
import UiIcon from "@/components/ui/icon/UiIcon.vue";
|
||||
import type { XenApiError } from "@/libs/xen-api/xen-api.types";
|
||||
|
||||
@@ -1,49 +1,28 @@
|
||||
<template>
|
||||
<div class="page-under-construction">
|
||||
<img alt="Under construction" src="@/assets/under-construction.svg" />
|
||||
<p class="title">{{ $t("xo-lite-under-construction") }}</p>
|
||||
<p class="subtitle">{{ $t("new-features-are-coming") }}</p>
|
||||
<UiStatusPanel
|
||||
:image-source="underConstruction"
|
||||
:subtitle="$t('new-features-are-coming')"
|
||||
:title="$t('xo-lite-under-construction')"
|
||||
>
|
||||
<p class="contact">
|
||||
{{ $t("do-you-have-needs") }}
|
||||
<a
|
||||
href="https://xcp-ng.org/forum/topic/5018/xo-lite-building-an-embedded-ui-in-xcp-ng"
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
target="_blank"
|
||||
>
|
||||
{{ $t("here") }} →
|
||||
</a>
|
||||
</p>
|
||||
</div>
|
||||
</UiStatusPanel>
|
||||
</template>
|
||||
|
||||
<script lang="ts" setup>
|
||||
import underConstruction from "@/assets/under-construction.svg";
|
||||
import UiStatusPanel from "@/components/ui/UiStatusPanel.vue";
|
||||
</script>
|
||||
|
||||
<style lang="postcss" scoped>
|
||||
.page-under-construction {
|
||||
width: 100%;
|
||||
min-height: 76.5vh;
|
||||
height: 100%;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
color: var(--color-extra-blue-base);
|
||||
}
|
||||
|
||||
img {
|
||||
margin-bottom: 40px;
|
||||
width: 30%;
|
||||
}
|
||||
.title {
|
||||
font-weight: 400;
|
||||
font-size: 36px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.subtitle {
|
||||
font-weight: 500;
|
||||
font-size: 24px;
|
||||
margin: 21px 0;
|
||||
text-align: center;
|
||||
}
|
||||
.contact {
|
||||
font-weight: 400;
|
||||
font-size: 20px;
|
||||
|
||||
59
@xen-orchestra/lite/src/components/PoolOverrideWarning.vue
Normal file
59
@xen-orchestra/lite/src/components/PoolOverrideWarning.vue
Normal file
@@ -0,0 +1,59 @@
|
||||
<template>
|
||||
<div
|
||||
v-if="xenApi.isPoolOverridden"
|
||||
class="warning-not-current-pool"
|
||||
@click="xenApi.resetPoolMasterIp"
|
||||
v-tooltip="
|
||||
asTooltip && {
|
||||
placement: 'right',
|
||||
content: `
|
||||
${$t('you-are-currently-on', [masterSessionStorage])}.
|
||||
${$t('click-to-return-default-pool')}
|
||||
`,
|
||||
}
|
||||
"
|
||||
>
|
||||
<div class="wrapper">
|
||||
<UiIcon :icon="faWarning" />
|
||||
<p v-if="!asTooltip">
|
||||
<i18n-t keypath="you-are-currently-on">
|
||||
<strong>{{ masterSessionStorage }}</strong>
|
||||
</i18n-t>
|
||||
<br />
|
||||
{{ $t("click-to-return-default-pool") }}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script lang="ts" setup>
|
||||
import { faWarning } from "@fortawesome/free-solid-svg-icons";
|
||||
import { useSessionStorage } from "@vueuse/core";
|
||||
|
||||
import UiIcon from "@/components/ui/icon/UiIcon.vue";
|
||||
import { useXenApiStore } from "@/stores/xen-api.store";
|
||||
import { vTooltip } from "@/directives/tooltip.directive";
|
||||
|
||||
defineProps<{
|
||||
asTooltip?: boolean;
|
||||
}>();
|
||||
|
||||
const xenApi = useXenApiStore();
|
||||
const masterSessionStorage = useSessionStorage("master", null);
|
||||
</script>
|
||||
|
||||
<style lang="postcss" scoped>
|
||||
.warning-not-current-pool {
|
||||
color: var(--color-orange-world-base);
|
||||
cursor: pointer;
|
||||
|
||||
.wrapper {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
|
||||
svg {
|
||||
margin: auto 1rem;
|
||||
}
|
||||
}
|
||||
}
|
||||
</style>
|
||||
@@ -6,7 +6,7 @@
|
||||
>
|
||||
<input
|
||||
v-model="value"
|
||||
:class="{ indeterminate: type === 'checkbox' && value === undefined }"
|
||||
:class="{ indeterminate: isIndeterminate }"
|
||||
:disabled="isDisabled"
|
||||
:type="type === 'radio' ? 'radio' : 'checkbox'"
|
||||
class="input"
|
||||
@@ -60,6 +60,10 @@ const icon = computed(() => {
|
||||
|
||||
return faCheck;
|
||||
});
|
||||
|
||||
const isIndeterminate = computed(
|
||||
() => (type === "checkbox" || type === "toggle") && value.value === undefined
|
||||
);
|
||||
</script>
|
||||
|
||||
<style lang="postcss" scoped>
|
||||
@@ -127,6 +131,12 @@ const icon = computed(() => {
|
||||
.input:checked + .fake-checkbox > .icon {
|
||||
transform: translateX(0.7em);
|
||||
}
|
||||
|
||||
.input.indeterminate + .fake-checkbox > .icon {
|
||||
opacity: 1;
|
||||
color: var(--color-blue-scale-300);
|
||||
transform: translateX(0);
|
||||
}
|
||||
}
|
||||
|
||||
.input {
|
||||
|
||||
@@ -3,8 +3,14 @@
|
||||
<UiCardTitle>
|
||||
{{ $t("cpu-provisioning") }}
|
||||
<template v-if="!hasError" #right>
|
||||
<!-- TODO: add a tooltip for the warning icon -->
|
||||
<UiStatusIcon v-if="state !== 'success'" :state="state" />
|
||||
<UiStatusIcon
|
||||
v-if="state !== 'success'"
|
||||
v-tooltip="{
|
||||
content: $t('cpu-provisioning-warning'),
|
||||
placement: 'left',
|
||||
}"
|
||||
:state="state"
|
||||
/>
|
||||
</template>
|
||||
</UiCardTitle>
|
||||
<NoDataError v-if="hasError" />
|
||||
@@ -37,11 +43,12 @@ import UiCard from "@/components/ui/UiCard.vue";
|
||||
import UiCardFooter from "@/components/ui/UiCardFooter.vue";
|
||||
import UiCardSpinner from "@/components/ui/UiCardSpinner.vue";
|
||||
import UiCardTitle from "@/components/ui/UiCardTitle.vue";
|
||||
import { useHostCollection } from "@/stores/xen-api/host.store";
|
||||
import { useVmCollection } from "@/stores/xen-api/vm.store";
|
||||
import { useVmMetricsCollection } from "@/stores/xen-api/vm-metrics.store";
|
||||
import { vTooltip } from "@/directives/tooltip.directive";
|
||||
import { percent } from "@/libs/utils";
|
||||
import { VM_POWER_STATE } from "@/libs/xen-api/xen-api.enums";
|
||||
import { useHostCollection } from "@/stores/xen-api/host.store";
|
||||
import { useVmMetricsCollection } from "@/stores/xen-api/vm-metrics.store";
|
||||
import { useVmCollection } from "@/stores/xen-api/vm.store";
|
||||
import { logicAnd } from "@vueuse/math";
|
||||
import { computed } from "vue";
|
||||
|
||||
|
||||
47
@xen-orchestra/lite/src/components/ui/UiStatusPanel.vue
Normal file
47
@xen-orchestra/lite/src/components/ui/UiStatusPanel.vue
Normal file
@@ -0,0 +1,47 @@
|
||||
<template>
|
||||
<div class="ui-status-panel">
|
||||
<img :src="imageSource" alt="" class="image" />
|
||||
<p v-if="title !== undefined" class="title">{{ title }}</p>
|
||||
<p v-if="subtitle !== undefined" class="subtitle">{{ subtitle }}</p>
|
||||
<slot />
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script lang="ts" setup>
|
||||
defineProps<{
|
||||
imageSource: string;
|
||||
title?: string;
|
||||
subtitle?: string;
|
||||
}>();
|
||||
</script>
|
||||
|
||||
<style lang="postcss" scoped>
|
||||
.ui-status-panel {
|
||||
width: 100%;
|
||||
min-height: 76.5vh;
|
||||
height: 100%;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
color: var(--color-extra-blue-base);
|
||||
}
|
||||
|
||||
.title {
|
||||
font-weight: 400;
|
||||
font-size: 36px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.subtitle {
|
||||
font-weight: 500;
|
||||
font-size: 24px;
|
||||
margin: 21px 0;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.image {
|
||||
margin-bottom: 40px;
|
||||
width: 30%;
|
||||
}
|
||||
</style>
|
||||
@@ -3,7 +3,11 @@
|
||||
v-tooltip="
|
||||
selectedRefs.length > 0 &&
|
||||
!isMigratable &&
|
||||
$t('no-selected-vm-can-be-migrated')
|
||||
$t(
|
||||
isSingleAction
|
||||
? 'this-vm-cant-be-migrated'
|
||||
: 'no-selected-vm-can-be-migrated'
|
||||
)
|
||||
"
|
||||
:busy="isMigrating"
|
||||
:disabled="isParentDisabled || !isMigratable"
|
||||
@@ -28,6 +32,7 @@ import { computed } from "vue";
|
||||
|
||||
const props = defineProps<{
|
||||
selectedRefs: XenApiVm["$ref"][];
|
||||
isSingleAction?: boolean;
|
||||
}>();
|
||||
|
||||
const { getByOpaqueRefs, isOperationPending, areSomeOperationAllowed } =
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
<VmActionCopyItem :selected-refs="[vm.$ref]" is-single-action />
|
||||
<VmActionExportItem :vm-refs="[vm.$ref]" is-single-action />
|
||||
<VmActionSnapshotItem :vm-refs="[vm.$ref]" />
|
||||
<VmActionMigrateItem :selected-refs="[vm.$ref]" is-single-action />
|
||||
</AppMenu>
|
||||
</template>
|
||||
</TitleBar>
|
||||
@@ -38,6 +39,7 @@ import AppMenu from "@/components/menu/AppMenu.vue";
|
||||
import TitleBar from "@/components/TitleBar.vue";
|
||||
import UiIcon from "@/components/ui/icon/UiIcon.vue";
|
||||
import UiButton from "@/components/ui/UiButton.vue";
|
||||
import VmActionMigrateItem from "@/components/vm/VmActionItems/VmActionMigrateItem.vue";
|
||||
import VmActionPowerStateItems from "@/components/vm/VmActionItems/VmActionPowerStateItems.vue";
|
||||
import VmActionSnapshotItem from "@/components/vm/VmActionItems/VmActionSnapshotItem.vue";
|
||||
import VmActionCopyItem from "@/components/vm/VmActionItems/VmActionCopyItem.vue";
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
"cancel": "Cancel",
|
||||
"change-state": "Change state",
|
||||
"click-to-display-alarms": "Click to display alarms:",
|
||||
"click-to-return-default-pool": "Click here to return to the default pool",
|
||||
"close": "Close",
|
||||
"coming-soon": "Coming soon!",
|
||||
"community": "Community",
|
||||
@@ -37,6 +38,7 @@
|
||||
"console-unavailable": "Console unavailable",
|
||||
"copy": "Copy",
|
||||
"cpu-provisioning": "CPU provisioning",
|
||||
"cpu-provisioning-warning": "The number of vCPUs allocated exceeds the number of physical CPUs available. System performance could be affected",
|
||||
"cpu-usage": "CPU usage",
|
||||
"dashboard": "Dashboard",
|
||||
"delete": "Delete",
|
||||
@@ -177,6 +179,7 @@
|
||||
"theme-auto": "Auto",
|
||||
"theme-dark": "Dark",
|
||||
"theme-light": "Light",
|
||||
"this-vm-cant-be-migrated": "This VM can't be migrated",
|
||||
"top-#": "Top {n}",
|
||||
"total-cpus": "Total CPUs",
|
||||
"total-free": "Total free",
|
||||
@@ -189,5 +192,6 @@
|
||||
"vm-is-running": "The VM is running",
|
||||
"vms": "VMs",
|
||||
"xo-lite-under-construction": "XOLite is under construction",
|
||||
"you-are-currently-on": "You are currently on: {0}",
|
||||
"zstd": "zstd"
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
"cancel": "Annuler",
|
||||
"change-state": "Changer l'état",
|
||||
"click-to-display-alarms": "Cliquer pour afficher les alarmes :",
|
||||
"click-to-return-default-pool": "Cliquer ici pour revenir au pool par défaut",
|
||||
"close": "Fermer",
|
||||
"coming-soon": "Bientôt disponible !",
|
||||
"community": "Communauté",
|
||||
@@ -37,6 +38,7 @@
|
||||
"console-unavailable": "Console indisponible",
|
||||
"copy": "Copier",
|
||||
"cpu-provisioning": "Provisionnement CPU",
|
||||
"cpu-provisioning-warning": "Le nombre de vCPU alloués dépasse le nombre de CPU physique disponible. Les performances du système pourraient être affectées",
|
||||
"cpu-usage": "Utilisation CPU",
|
||||
"dashboard": "Tableau de bord",
|
||||
"delete": "Supprimer",
|
||||
@@ -177,6 +179,7 @@
|
||||
"theme-auto": "Auto",
|
||||
"theme-dark": "Sombre",
|
||||
"theme-light": "Clair",
|
||||
"this-vm-cant-be-migrated": "Cette VM ne peut pas être migrée",
|
||||
"top-#": "Top {n}",
|
||||
"total-cpus": "Total CPUs",
|
||||
"total-free": "Total libre",
|
||||
@@ -189,5 +192,6 @@
|
||||
"vm-is-running": "La VM est en cours d'exécution",
|
||||
"vms": "VMs",
|
||||
"xo-lite-under-construction": "XOLite est en construction",
|
||||
"you-are-currently-on": "Vous êtes actuellement sur : {0}",
|
||||
"zstd": "zstd"
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import XapiStats from "@/libs/xapi-stats";
|
||||
import XenApi from "@/libs/xen-api/xen-api";
|
||||
import { useLocalStorage } from "@vueuse/core";
|
||||
import { useLocalStorage, useSessionStorage, whenever } from "@vueuse/core";
|
||||
import { defineStore } from "pinia";
|
||||
import { computed, ref, watchEffect } from "vue";
|
||||
import { useRouter } from "vue-router";
|
||||
import { useRoute } from "vue-router";
|
||||
|
||||
const HOST_URL = import.meta.env.PROD
|
||||
? window.origin
|
||||
@@ -15,7 +17,27 @@ enum STATUS {
|
||||
}
|
||||
|
||||
export const useXenApiStore = defineStore("xen-api", () => {
|
||||
const xenApi = new XenApi(HOST_URL);
|
||||
// undefined not correctly handled. See https://github.com/vueuse/vueuse/issues/3595
|
||||
const masterSessionStorage = useSessionStorage<null | string>("master", null);
|
||||
const router = useRouter();
|
||||
const route = useRoute();
|
||||
|
||||
whenever(
|
||||
() => route.query.master,
|
||||
async (newMaster) => {
|
||||
masterSessionStorage.value = newMaster as string;
|
||||
await router.replace({ query: { ...route.query, master: undefined } });
|
||||
window.location.reload();
|
||||
}
|
||||
);
|
||||
|
||||
const hostUrl = new URL(HOST_URL);
|
||||
if (masterSessionStorage.value !== null) {
|
||||
hostUrl.hostname = masterSessionStorage.value;
|
||||
}
|
||||
|
||||
const isPoolOverridden = hostUrl.origin !== new URL(HOST_URL).origin;
|
||||
const xenApi = new XenApi(hostUrl.origin);
|
||||
const xapiStats = new XapiStats(xenApi);
|
||||
const storedSessionId = useLocalStorage<string | undefined>(
|
||||
"sessionId",
|
||||
@@ -75,14 +97,21 @@ export const useXenApiStore = defineStore("xen-api", () => {
|
||||
status.value = STATUS.DISCONNECTED;
|
||||
}
|
||||
|
||||
function resetPoolMasterIp() {
|
||||
masterSessionStorage.value = null;
|
||||
window.location.reload();
|
||||
}
|
||||
|
||||
return {
|
||||
isConnected,
|
||||
isConnecting,
|
||||
isPoolOverridden,
|
||||
connect,
|
||||
reconnect,
|
||||
disconnect,
|
||||
getXapi,
|
||||
getXapiStats,
|
||||
currentSessionId,
|
||||
resetPoolMasterIp,
|
||||
};
|
||||
});
|
||||
|
||||
@@ -2,16 +2,17 @@
|
||||
<div :class="{ 'no-ui': !uiStore.hasUi }" class="vm-console-view">
|
||||
<div v-if="hasError">{{ $t("error-occurred") }}</div>
|
||||
<UiSpinner v-else-if="!isReady" class="spinner" />
|
||||
<div v-else-if="!isVmRunning" class="not-running">
|
||||
<div><img alt="" src="@/assets/monitor.svg" /></div>
|
||||
{{ $t("power-on-for-console") }}
|
||||
</div>
|
||||
<UiStatusPanel
|
||||
v-else-if="!isVmRunning"
|
||||
:image-source="monitor"
|
||||
:title="$t('power-on-for-console')"
|
||||
/>
|
||||
<template v-else-if="vm && vmConsole">
|
||||
<AppMenu horizontal>
|
||||
<MenuItem
|
||||
v-if="uiStore.hasUi"
|
||||
:icon="faArrowUpRightFromSquare"
|
||||
@click="openInNewTab"
|
||||
v-if="uiStore.hasUi"
|
||||
>
|
||||
{{ $t("open-console-in-new-tab") }}
|
||||
</MenuItem>
|
||||
@@ -44,10 +45,12 @@
|
||||
</template>
|
||||
|
||||
<script lang="ts" setup>
|
||||
import monitor from "@/assets/monitor.svg";
|
||||
import AppMenu from "@/components/menu/AppMenu.vue";
|
||||
import MenuItem from "@/components/menu/MenuItem.vue";
|
||||
import RemoteConsole from "@/components/RemoteConsole.vue";
|
||||
import UiSpinner from "@/components/ui/UiSpinner.vue";
|
||||
import UiStatusPanel from "@/components/ui/UiStatusPanel.vue";
|
||||
import { VM_OPERATION, VM_POWER_STATE } from "@/libs/xen-api/xen-api.enums";
|
||||
import type { XenApiVm } from "@/libs/xen-api/xen-api.types";
|
||||
import { usePageTitleStore } from "@/stores/page-title.store";
|
||||
@@ -158,7 +161,6 @@ const openInNewTab = () => {
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.not-running,
|
||||
.not-available {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
|
||||
@@ -3,6 +3,7 @@ import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { decorateClass } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import { incorrectState, operationFailed } from 'xo-common/api-errors.js'
|
||||
import pRetry from 'promise-toolbox/retry'
|
||||
|
||||
import { getCurrentVmUuid } from './_XenStore.mjs'
|
||||
|
||||
@@ -69,7 +70,12 @@ class Host {
|
||||
if (await this.getField('host', ref, 'enabled')) {
|
||||
await this.callAsync('host.disable', ref)
|
||||
$defer(async () => {
|
||||
await this.callAsync('host.enable', ref)
|
||||
await pRetry(() => this.callAsync('host.enable', ref), {
|
||||
delay: 10e3,
|
||||
retries: 6,
|
||||
when: { code: 'HOST_STILL_BOOTING' },
|
||||
})
|
||||
|
||||
// Resuming VMs should occur after host enabling to avoid triggering a 'NO_HOSTS_AVAILABLE' error
|
||||
return asyncEach(suspendedVms, vmRef => this.callAsync('VM.resume', vmRef, false, false))
|
||||
})
|
||||
|
||||
@@ -3,17 +3,17 @@ import pCatch from 'promise-toolbox/catch'
|
||||
import pRetry from 'promise-toolbox/retry'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { decorateClass } from '@vates/decorate-with'
|
||||
import { finished } from 'node:stream'
|
||||
import { strict as assert } from 'node:assert'
|
||||
|
||||
import extractOpaqueRef from './_extractOpaqueRef.mjs'
|
||||
import NbdClient from '@vates/nbd-client'
|
||||
import { createNbdRawStream, createNbdVhdStream } from 'vhd-lib/createStreamNbd.js'
|
||||
import MultiNbdClient from '@vates/nbd-client/multi.mjs'
|
||||
import { createNbdVhdStream, createNbdRawStream } from 'vhd-lib/createStreamNbd.js'
|
||||
import { VDI_FORMAT_RAW, VDI_FORMAT_VHD } from './index.mjs'
|
||||
|
||||
const { warn } = createLogger('xo:xapi:vdi')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
class Vdi {
|
||||
async clone(vdiRef) {
|
||||
return extractOpaqueRef(await this.callAsync('VDI.clone', vdiRef))
|
||||
@@ -64,13 +64,13 @@ class Vdi {
|
||||
})
|
||||
}
|
||||
|
||||
async _getNbdClient(ref) {
|
||||
async _getNbdClient(ref, { nbdConcurrency = 1 } = {}) {
|
||||
const nbdInfos = await this.call('VDI.get_nbd_info', ref)
|
||||
if (nbdInfos.length > 0) {
|
||||
// a little bit of randomization to spread the load
|
||||
const nbdInfo = nbdInfos[Math.floor(Math.random() * nbdInfos.length)]
|
||||
try {
|
||||
const nbdClient = new NbdClient(nbdInfo, this._nbdOptions)
|
||||
const nbdClient = new MultiNbdClient(nbdInfos, { ...this._nbdOptions, nbdConcurrency })
|
||||
await nbdClient.connect()
|
||||
return nbdClient
|
||||
} catch (err) {
|
||||
@@ -83,7 +83,10 @@ class Vdi {
|
||||
}
|
||||
}
|
||||
|
||||
async exportContent(ref, { baseRef, cancelToken = CancelToken.none, format, preferNbd = this._preferNbd }) {
|
||||
async exportContent(
|
||||
ref,
|
||||
{ baseRef, cancelToken = CancelToken.none, format, nbdConcurrency = 1, preferNbd = this._preferNbd }
|
||||
) {
|
||||
const query = {
|
||||
format,
|
||||
vdi: ref,
|
||||
@@ -97,19 +100,23 @@ class Vdi {
|
||||
let nbdClient, stream
|
||||
try {
|
||||
if (preferNbd) {
|
||||
nbdClient = await this._getNbdClient(ref)
|
||||
nbdClient = await this._getNbdClient(ref, { nbdConcurrency })
|
||||
}
|
||||
// the raw nbd export does not need to peek ath the vhd source
|
||||
if (nbdClient !== undefined && format === VDI_FORMAT_RAW) {
|
||||
stream = createNbdRawStream(nbdClient)
|
||||
} else {
|
||||
// raw export without nbd or vhd exports needs a resource stream
|
||||
const vdiName = await this.getField('VDI', ref, 'name_label')
|
||||
stream = await this.getResource(cancelToken, '/export_raw_vdi/', {
|
||||
query,
|
||||
task: await this.task_create(`Exporting content of VDI ${await this.getField('VDI', ref, 'name_label')}`),
|
||||
task: await this.task_create(`Exporting content of VDI ${vdiName}`),
|
||||
})
|
||||
if (nbdClient !== undefined && format === VDI_FORMAT_VHD) {
|
||||
const taskRef = await this.task_create(`Exporting content of VDI ${vdiName} using NBD`)
|
||||
stream = await createNbdVhdStream(nbdClient, stream)
|
||||
stream.on('progress', progress => this.call('task.set_progress', taskRef, progress))
|
||||
finished(stream, () => this.task_destroy(taskRef))
|
||||
}
|
||||
}
|
||||
return stream
|
||||
|
||||
@@ -7,10 +7,30 @@
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Forget SR] Changed the modal message and added a confirmation text to be sure the action is understood by the user [#7148](https://github.com/vatesfr/xen-orchestra/issues/7148) (PR [#7155](https://github.com/vatesfr/xen-orchestra/pull/7155))
|
||||
- [REST API] `/backups` has been renamed to `/backup` (redirections are in place for compatibility)
|
||||
- [REST API] _VM backup & Replication_ jobs have been moved from `/backup/jobs/:id` to `/backup/jobs/vm/:id` (redirections are in place for compatibility)
|
||||
- [REST API] _XO config & Pool metadata Backup_ jobs are available at `/backup/jobs/metadata`
|
||||
- [REST API] _Mirror Backup_ jobs are available at `/backup/jobs/mirror`
|
||||
- [Plugin/auth-saml] Add _Force re-authentication_ setting [Forum#67764](https://xcp-ng.org/forum/post/67764) (PR [#7232](https://github.com/vatesfr/xen-orchestra/pull/7232))
|
||||
- [HTTP] `http.useForwardedHeaders` setting can be enabled when XO is behind a reverse proxy to fetch clients IP addresses from `X-Forwarded-*` headers [Forum#67625](https://xcp-ng.org/forum/post/67625) (PR [#7233](https://github.com/vatesfr/xen-orchestra/pull/7233))
|
||||
- [Backup]Use multiple link to speedup NBD backup (PR [#7216](https://github.com/vatesfr/xen-orchestra/pull/7216))
|
||||
- [Backup] Show if disk is differential or full in incremental backups (PR [#7222](https://github.com/vatesfr/xen-orchestra/pull/7222))
|
||||
- [VDI] Create XAPI task during NBD export (PR [#7228](https://github.com/vatesfr/xen-orchestra/pull/7228))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Backup] Reduce memory consumption when using NBD (PR [#7216](https://github.com/vatesfr/xen-orchestra/pull/7216))
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [REST API] Returns a proper 404 _Not Found_ error when a job does not exist instead of _Internal Server Error_
|
||||
- [Host/Smart reboot] Automatically retries up to a minute when `HOST_STILL_BOOTING` [#7194](https://github.com/vatesfr/xen-orchestra/issues/7194) (PR [#7231](https://github.com/vatesfr/xen-orchestra/pull/7231))
|
||||
- [Plugin/transport-slack] Compatibility with other services like Mattermost or Discord [#7130](https://github.com/vatesfr/xen-orchestra/issues/7130) (PR [#7220](https://github.com/vatesfr/xen-orchestra/pull/7220))
|
||||
- [Host/Network] Fix error "PIF_IS_PHYSICAL" when trying to remove a PIF that had already been physically disconnected [#7193](https://github.com/vatesfr/xen-orchestra/issues/7193) (PR [#7221](https://github.com/vatesfr/xen-orchestra/pull/7221))
|
||||
- [Mirror backup] Fix backup reports not being sent (PR [#7235](https://github.com/vatesfr/xen-orchestra/pull/7235))
|
||||
- [RPU] VMs are correctly migrated to their original host (PR [#7238](https://github.com/vatesfr/xen-orchestra/pull/7238))
|
||||
|
||||
### Packages to release
|
||||
|
||||
> When modifying a package, add it here with its release type.
|
||||
@@ -27,4 +47,14 @@
|
||||
|
||||
<!--packages-start-->
|
||||
|
||||
- @vates/nbd-client major
|
||||
- @xen-orchestra/backups patch
|
||||
- @xen-orchestra/xapi minor
|
||||
- vhd-lib minor
|
||||
- xo-server minor
|
||||
- xo-server-auth-saml minor
|
||||
- xo-server-transport-email major
|
||||
- xo-server-transport-slack patch
|
||||
- xo-web patch
|
||||
|
||||
<!--packages-end-->
|
||||
|
||||
BIN
docs/assets/nbd-backup-log.png
Normal file
BIN
docs/assets/nbd-backup-log.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 27 KiB |
BIN
docs/assets/nbd-connection.png
Normal file
BIN
docs/assets/nbd-connection.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 12 KiB |
BIN
docs/assets/nbd-enable.png
Normal file
BIN
docs/assets/nbd-enable.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.2 KiB |
@@ -64,3 +64,24 @@ For example, with a value of 2, the first two backups will be a key and a delta,
|
||||
This is important because on rare occasions a backup can be corrupted, and in the case of incremetnal backups, this corruption might impact all the following backups in the chain. Occasionally performing a full backup limits how far a corrupted delta backup can propagate.
|
||||
|
||||
The value to use depends on your storage constraints and the frequency of your backups, but a value of 20 is a good start.
|
||||
|
||||
## NBD-enabled Backups
|
||||
|
||||
You have the option to utilize the NBD network protocol for data transfer instead of the VHD handler generated by the XAPI. NBD-enabled backups generally show improved speed as the load on the Dom0 is reduced.
|
||||
|
||||
To enable NBD on the pool network, select the relevant pool, and navigate to the Network tab to modify the parameter:
|
||||
|
||||

|
||||
|
||||
This will securely transfer encrypted data from the host to the XOA.
|
||||
When creating or editing an incremental (previously known as delta) backup and replication for this pool in the future, you have the option to enable NBD in the Advanced settings:
|
||||
|
||||

|
||||
|
||||
After the job is completed, you can verify whether NBD was used for the transfer in the backup log:
|
||||
|
||||

|
||||
|
||||
It's important to note that NBD exports are not currently visible in the task list before 5.90 (december 2023).
|
||||
|
||||
To learn more about the evolution of this feature across various XO releases, check out our blog posts for versions [5.76](https://xen-orchestra.com/blog/xen-orchestra-5-76/), [5.81](https://xen-orchestra.comblog/xen-orchestra-5-81/), [5.82](https://xen-orchestra.com/blog/xen-orchestra-5-82/), and [5.86](https://xen-orchestra.com/blog/xen-orchestra-5-86/).
|
||||
|
||||
@@ -106,13 +106,13 @@ XO needs the following packages to be installed. Redis is used as a database by
|
||||
For example, on Debian/Ubuntu:
|
||||
|
||||
```sh
|
||||
apt-get install build-essential redis-server libpng-dev git python3-minimal libvhdi-utils lvm2 cifs-utils nfs-common
|
||||
apt-get install build-essential redis-server libpng-dev git python3-minimal libvhdi-utils lvm2 cifs-utils nfs-common ntfs-3g
|
||||
```
|
||||
|
||||
On Fedora/CentOS like:
|
||||
|
||||
```sh
|
||||
dnf install redis libpng-devel git libvhdi-tools lvm2 cifs-utils make automake gcc gcc-c++
|
||||
dnf install redis libpng-devel git lvm2 cifs-utils make automake gcc gcc-c++ nfs-utils ntfs-3g
|
||||
```
|
||||
|
||||
### Make sure Redis is running
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
'use strict'
|
||||
const { finished, Readable } = require('node:stream')
|
||||
const { readChunkStrict, skipStrict } = require('@vates/read-chunk')
|
||||
const { Readable } = require('node:stream')
|
||||
const { unpackHeader } = require('./Vhd/_utils')
|
||||
const {
|
||||
FOOTER_SIZE,
|
||||
@@ -14,18 +14,38 @@ const {
|
||||
const { fuHeader, checksumStruct } = require('./_structs')
|
||||
const assert = require('node:assert')
|
||||
|
||||
exports.createNbdRawStream = async function createRawStream(nbdClient) {
|
||||
const stream = Readable.from(nbdClient.readBlocks())
|
||||
const MAX_DURATION_BETWEEN_PROGRESS_EMIT = 5e3
|
||||
const MIN_TRESHOLD_PERCENT_BETWEEN_PROGRESS_EMIT = 1
|
||||
|
||||
exports.createNbdRawStream = function createRawStream(nbdClient) {
|
||||
const exportSize = Number(nbdClient.exportSize)
|
||||
const chunkSize = 2 * 1024 * 1024
|
||||
|
||||
const indexGenerator = function* () {
|
||||
const nbBlocks = Math.ceil(exportSize / chunkSize)
|
||||
for (let index = 0; index < nbBlocks; index++) {
|
||||
yield { index, size: chunkSize }
|
||||
}
|
||||
}
|
||||
const stream = Readable.from(nbdClient.readBlocks(indexGenerator), { objectMode: false })
|
||||
|
||||
stream.on('error', () => nbdClient.disconnect())
|
||||
stream.on('end', () => nbdClient.disconnect())
|
||||
return stream
|
||||
}
|
||||
|
||||
exports.createNbdVhdStream = async function createVhdStream(nbdClient, sourceStream) {
|
||||
exports.createNbdVhdStream = async function createVhdStream(
|
||||
nbdClient,
|
||||
sourceStream,
|
||||
{
|
||||
maxDurationBetweenProgressEmit = MAX_DURATION_BETWEEN_PROGRESS_EMIT,
|
||||
minTresholdPercentBetweenProgressEmit = MIN_TRESHOLD_PERCENT_BETWEEN_PROGRESS_EMIT,
|
||||
} = {}
|
||||
) {
|
||||
const bufFooter = await readChunkStrict(sourceStream, FOOTER_SIZE)
|
||||
|
||||
const header = unpackHeader(await readChunkStrict(sourceStream, HEADER_SIZE))
|
||||
header.tableOffset = FOOTER_SIZE + HEADER_SIZE
|
||||
// compute BAT in order
|
||||
const batSize = Math.ceil((header.maxTableEntries * 4) / SECTOR_SIZE) * SECTOR_SIZE
|
||||
await skipStrict(sourceStream, header.tableOffset - (FOOTER_SIZE + HEADER_SIZE))
|
||||
@@ -47,7 +67,6 @@ exports.createNbdVhdStream = async function createVhdStream(nbdClient, sourceStr
|
||||
precLocator = offset
|
||||
}
|
||||
}
|
||||
header.tableOffset = FOOTER_SIZE + HEADER_SIZE
|
||||
const rawHeader = fuHeader.pack(header)
|
||||
checksumStruct(rawHeader, fuHeader)
|
||||
|
||||
@@ -69,10 +88,35 @@ exports.createNbdVhdStream = async function createVhdStream(nbdClient, sourceStr
|
||||
}
|
||||
}
|
||||
|
||||
const totalLength = (offsetSector + blockSizeInSectors + 1) /* end footer */ * SECTOR_SIZE
|
||||
|
||||
let lengthRead = 0
|
||||
let lastUpdate = 0
|
||||
let lastLengthRead = 0
|
||||
|
||||
function throttleEmitProgress() {
|
||||
const now = Date.now()
|
||||
|
||||
if (
|
||||
lengthRead - lastLengthRead > (minTresholdPercentBetweenProgressEmit / 100) * totalLength ||
|
||||
(now - lastUpdate > maxDurationBetweenProgressEmit && lengthRead !== lastLengthRead)
|
||||
) {
|
||||
stream.emit('progress', lengthRead / totalLength)
|
||||
lastUpdate = now
|
||||
lastLengthRead = lengthRead
|
||||
}
|
||||
}
|
||||
|
||||
function trackAndGet(buffer) {
|
||||
lengthRead += buffer.length
|
||||
throttleEmitProgress()
|
||||
return buffer
|
||||
}
|
||||
|
||||
async function* iterator() {
|
||||
yield bufFooter
|
||||
yield rawHeader
|
||||
yield bat
|
||||
yield trackAndGet(bufFooter)
|
||||
yield trackAndGet(rawHeader)
|
||||
yield trackAndGet(bat)
|
||||
|
||||
let precBlocOffset = FOOTER_SIZE + HEADER_SIZE + batSize
|
||||
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
|
||||
@@ -82,7 +126,7 @@ exports.createNbdVhdStream = async function createVhdStream(nbdClient, sourceStr
|
||||
await skipStrict(sourceStream, parentLocatorOffset - precBlocOffset)
|
||||
const data = await readChunkStrict(sourceStream, space)
|
||||
precBlocOffset = parentLocatorOffset + space
|
||||
yield data
|
||||
yield trackAndGet(data)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,16 +141,20 @@ exports.createNbdVhdStream = async function createVhdStream(nbdClient, sourceStr
|
||||
})
|
||||
const bitmap = Buffer.alloc(SECTOR_SIZE, 255)
|
||||
for await (const block of nbdIterator) {
|
||||
yield bitmap // don't forget the bitmap before the block
|
||||
yield block
|
||||
yield trackAndGet(bitmap) // don't forget the bitmap before the block
|
||||
yield trackAndGet(block)
|
||||
}
|
||||
yield bufFooter
|
||||
yield trackAndGet(bufFooter)
|
||||
}
|
||||
|
||||
const stream = Readable.from(iterator())
|
||||
stream.length = (offsetSector + blockSizeInSectors + 1) /* end footer */ * SECTOR_SIZE
|
||||
const stream = Readable.from(iterator(), { objectMode: false })
|
||||
stream.length = totalLength
|
||||
stream._nbd = true
|
||||
stream.on('error', () => nbdClient.disconnect())
|
||||
stream.on('end', () => nbdClient.disconnect())
|
||||
finished(stream, () => {
|
||||
clearInterval(interval)
|
||||
nbdClient.disconnect()
|
||||
})
|
||||
const interval = setInterval(throttleEmitProgress, maxDurationBetweenProgressEmit)
|
||||
|
||||
return stream
|
||||
}
|
||||
|
||||
@@ -223,6 +223,15 @@ class Merger {
|
||||
)
|
||||
// ensure data size is correct
|
||||
await this.#writeState()
|
||||
try {
|
||||
// vhd file
|
||||
this.#state.vhdSize = await parentVhd.getSize()
|
||||
} catch (err) {
|
||||
// vhd directory
|
||||
// parentVhd has its bat already loaded
|
||||
this.#state.vhdSize = parentVhd.streamSize()
|
||||
}
|
||||
|
||||
this.#onProgress({ total: nBlocks, done: nBlocks })
|
||||
}
|
||||
|
||||
@@ -294,9 +303,10 @@ class Merger {
|
||||
}
|
||||
|
||||
async #cleanup() {
|
||||
const mergedSize = this.#state?.mergedDataSize ?? 0
|
||||
const finalVhdSize = this.#state?.vhdSize ?? 0
|
||||
const mergedDataSize = this.#state?.mergedDataSize ?? 0
|
||||
await this.#handler.unlink(this.#statePath).catch(warn)
|
||||
return mergedSize
|
||||
return { mergedDataSize, finalVhdSize}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import ms from 'ms'
|
||||
import httpRequest from 'http-request-plus'
|
||||
import map from 'lodash/map.js'
|
||||
import noop from 'lodash/noop.js'
|
||||
import ProxyAgent from 'proxy-agent'
|
||||
import { coalesceCalls } from '@vates/coalesce-calls'
|
||||
import { Collection } from 'xo-collection'
|
||||
import { EventEmitter } from 'events'
|
||||
@@ -13,6 +12,7 @@ import { Index } from 'xo-collection/index.js'
|
||||
import { cancelable, defer, fromCallback, ignoreErrors, pDelay, pRetry, pTimeout } from 'promise-toolbox'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
import { decorateClass } from '@vates/decorate-with'
|
||||
import { ProxyAgent } from 'proxy-agent'
|
||||
|
||||
import debug from './_debug.mjs'
|
||||
import getTaskResult from './_getTaskResult.mjs'
|
||||
@@ -135,13 +135,12 @@ export class Xapi extends EventEmitter {
|
||||
}
|
||||
|
||||
this._allowUnauthorized = opts.allowUnauthorized
|
||||
let { httpProxy } = opts
|
||||
const { httpProxy } = opts
|
||||
if (httpProxy !== undefined) {
|
||||
if (httpProxy.startsWith('https:')) {
|
||||
httpProxy = parseUrl(httpProxy)
|
||||
httpProxy.rejectUnauthorized = !opts.allowUnauthorized
|
||||
}
|
||||
this._httpAgent = new ProxyAgent(httpProxy)
|
||||
this._httpAgent = new ProxyAgent({
|
||||
getProxyForUrl: () => httpProxy,
|
||||
rejectUnauthorized: !opts.allowUnauthorized,
|
||||
})
|
||||
}
|
||||
this._setUrl(url)
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
"minimist": "^1.2.0",
|
||||
"ms": "^2.1.1",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"proxy-agent": "^5.0.0",
|
||||
"proxy-agent": "^6.3.1",
|
||||
"pw": "0.0.4",
|
||||
"xmlrpc": "^1.3.2",
|
||||
"xo-collection": "^0.5.0"
|
||||
|
||||
@@ -46,6 +46,12 @@ You should try \`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddr
|
||||
default: DEFAULTS.disableRequestedAuthnContext,
|
||||
type: 'boolean',
|
||||
},
|
||||
forceAuthn: {
|
||||
title: 'Force re-authentication',
|
||||
description: 'Request the identity provider to authenticate the user, even if they possess a valid session.',
|
||||
default: false,
|
||||
type: 'boolean',
|
||||
},
|
||||
},
|
||||
required: ['cert', 'entryPoint', 'issuer', 'usernameField'],
|
||||
}
|
||||
|
||||
@@ -25,11 +25,11 @@
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
"node": ">=18"
|
||||
},
|
||||
"dependencies": {
|
||||
"marked": "^11.1.0",
|
||||
"nodemailer": "^6.1.0",
|
||||
"nodemailer-markdown": "^1.0.1",
|
||||
"promise-toolbox": "^0.21.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { createTransport } from 'nodemailer'
|
||||
import { markdown as nodemailerMarkdown } from 'nodemailer-markdown'
|
||||
import { Marked } from 'marked'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
|
||||
// ===================================================================
|
||||
@@ -13,8 +13,6 @@ const removeUndefined = obj => {
|
||||
return obj
|
||||
}
|
||||
|
||||
const markdownCompiler = nodemailerMarkdown()
|
||||
|
||||
const logAndRethrow = error => {
|
||||
console.error('[WARN] plugin transport-email:', (error && error.stack) || error)
|
||||
|
||||
@@ -138,6 +136,7 @@ export const testSchema = {
|
||||
|
||||
class TransportEmailPlugin {
|
||||
constructor({ staticConfig, xo }) {
|
||||
this._marked = new Marked()
|
||||
this._staticConfig = staticConfig
|
||||
this._xo = xo
|
||||
this._unset = null
|
||||
@@ -168,7 +167,26 @@ class TransportEmailPlugin {
|
||||
}
|
||||
|
||||
const transport = createTransport({ ...transportConf, ...this._staticConfig.transport }, { from })
|
||||
transport.use('compile', markdownCompiler)
|
||||
transport.use('compile', (mail, cb) => {
|
||||
const data = mail?.data
|
||||
if (data == null || data.markdown == null || data.html !== undefined) {
|
||||
return cb()
|
||||
}
|
||||
|
||||
mail.resolveContent(data, 'markdown', (error, markdown) => {
|
||||
if (error != null) {
|
||||
return cb(error)
|
||||
}
|
||||
|
||||
markdown = String(markdown)
|
||||
|
||||
data.html = this._marked.parse(markdown)
|
||||
if (data.text === undefined) {
|
||||
data.text = markdown
|
||||
}
|
||||
cb()
|
||||
})
|
||||
})
|
||||
|
||||
this._send = promisify(transport.sendMail, transport)
|
||||
}
|
||||
@@ -187,7 +205,7 @@ class TransportEmailPlugin {
|
||||
subject: '[Xen Orchestra] Test of transport-email plugin',
|
||||
markdown: `Hi there,
|
||||
|
||||
The transport-email plugin for Xen Orchestra server seems to be working fine, nicely done :)
|
||||
The \`transport-email\` plugin for *Xen Orchestra* server seems to be working fine, nicely done :)
|
||||
`,
|
||||
attachments: [
|
||||
{
|
||||
|
||||
@@ -29,8 +29,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"slack-node": "^0.1.8"
|
||||
"@slack/webhook": "^7.0.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import Slack from 'slack-node'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
import { IncomingWebhook } from '@slack/webhook'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -49,10 +48,9 @@ class XoServerTransportSlack {
|
||||
}
|
||||
|
||||
configure({ webhookUri, ...conf }) {
|
||||
const slack = new Slack()
|
||||
slack.setWebhook(webhookUri)
|
||||
const slack = new IncomingWebhook(webhookUri)
|
||||
this._conf = conf
|
||||
this._send = promisify(slack.webhook)
|
||||
this._send = slack.send.bind(slack)
|
||||
}
|
||||
|
||||
load() {
|
||||
|
||||
@@ -107,6 +107,9 @@ writeBlockConcurrency = 16
|
||||
enabled = false
|
||||
threshold = 1000
|
||||
|
||||
[http]
|
||||
useForwardedHeaders = false
|
||||
|
||||
# Helmet handles HTTP security via headers
|
||||
#
|
||||
# https://helmetjs.github.io/docs/
|
||||
|
||||
@@ -107,7 +107,8 @@
|
||||
"passport": "^0.6.0",
|
||||
"passport-local": "^1.0.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"proxy-agent": "^5.0.0",
|
||||
"proxy-addr": "^2.0.7",
|
||||
"proxy-agent": "^6.3.1",
|
||||
"pug": "^3.0.0",
|
||||
"pumpify": "^2.0.0",
|
||||
"pw": "^0.0.4",
|
||||
|
||||
@@ -72,6 +72,20 @@
|
||||
# good enough (e.g. a domain name must be used or there is a reverse proxy).
|
||||
#publicUrl = 'https://xoa.company.lan'
|
||||
|
||||
# Uncomment this if xo-server is behind a reverse proxy to make xo-server use
|
||||
# X-Forwarded-* headers to determine the IP address of clients.
|
||||
#
|
||||
# Accepted values for this setting:
|
||||
# - false (default): do not use the headers
|
||||
# - true: always use the headers
|
||||
# - a list of trusted addresses: the headers will be used only if the connection
|
||||
# is coming from one of these addresses
|
||||
#
|
||||
# More info about the accepted values: https://www.npmjs.com/package/proxy-addr?activeTab=readme#proxyaddrreq-trust
|
||||
#
|
||||
# > Note: X-Forwarded-* headers are easily spoofed and the detected IP addresses are unreliable.
|
||||
#useForwardedHeaders = true
|
||||
|
||||
# Settings applied to cookies created by xo-server's embedded HTTP server.
|
||||
#
|
||||
# See https://www.npmjs.com/package/cookie#options-1
|
||||
|
||||
@@ -22,6 +22,15 @@ const SCHEMA_SETTINGS = {
|
||||
minimum: 0,
|
||||
optional: true,
|
||||
},
|
||||
nbdConcurrency: {
|
||||
type: 'number',
|
||||
minimum: 1,
|
||||
optional: true,
|
||||
},
|
||||
preferNbd: {
|
||||
type: 'boolean',
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
additionalProperties: true,
|
||||
},
|
||||
@@ -279,8 +288,8 @@ importVmBackup.params = {
|
||||
},
|
||||
useDifferentialRestore: {
|
||||
type: 'boolean',
|
||||
optional: true
|
||||
}
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
|
||||
export function checkBackup({ id, settings, sr }) {
|
||||
|
||||
@@ -28,7 +28,7 @@ async function delete_({ pif }) {
|
||||
return
|
||||
}
|
||||
|
||||
await xapi.callAsync('PIF.destroy', pif._xapiRef)
|
||||
await xapi.callAsync('PIF.forget', pif._xapiRef)
|
||||
}
|
||||
export { delete_ as delete }
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ export async function copyVm({ vm, sr }) {
|
||||
const input = await srcXapi.VM_export(vm._xapiRef)
|
||||
// eslint-disable-next-line no-console
|
||||
console.log('import full VM...')
|
||||
await tgtXapi.VM_destroy((await tgtXapi.importVm(input, { srId: sr })).$ref)
|
||||
await tgtXapi.VM_destroy(await tgtXapi.VM_import(input, sr._xapiRef))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import { format } from 'json-rpc-peer'
|
||||
import { FAIL_ON_QUEUE } from 'limit-concurrency-decorator'
|
||||
import { getStreamAsBuffer } from 'get-stream'
|
||||
import { ignoreErrors } from 'promise-toolbox'
|
||||
import { invalidParameters, noSuchObject, operationFailed, unauthorized } from 'xo-common/api-errors.js'
|
||||
import { invalidParameters, noSuchObject, unauthorized } from 'xo-common/api-errors.js'
|
||||
import { Ref } from 'xen-api'
|
||||
|
||||
import { forEach, map, mapFilter, parseSize, safeDateFormat } from '../utils.mjs'
|
||||
@@ -561,24 +561,14 @@ export async function migrate({
|
||||
|
||||
await this.checkPermissions(permissions)
|
||||
|
||||
await this.getXapi(vm)
|
||||
.migrateVm(vm._xapiId, this.getXapi(host), host._xapiId, {
|
||||
sr: sr && this.getObject(sr, 'SR')._xapiId,
|
||||
migrationNetworkId: migrationNetwork != null ? migrationNetwork._xapiId : undefined,
|
||||
mapVifsNetworks: mapVifsNetworksXapi,
|
||||
mapVdisSrs: mapVdisSrsXapi,
|
||||
force,
|
||||
bypassAssert,
|
||||
})
|
||||
.catch(error => {
|
||||
if (error?.code !== undefined) {
|
||||
// make sure we log the original error
|
||||
log.warn('vm.migrate', { error })
|
||||
|
||||
throw operationFailed({ objectId: vm.id, code: error.code })
|
||||
}
|
||||
throw error
|
||||
})
|
||||
await this.getXapi(vm).migrateVm(vm._xapiId, this.getXapi(host), host._xapiId, {
|
||||
sr: sr && this.getObject(sr, 'SR')._xapiId,
|
||||
migrationNetworkId: migrationNetwork != null ? migrationNetwork._xapiId : undefined,
|
||||
mapVifsNetworks: mapVifsNetworksXapi,
|
||||
mapVdisSrs: mapVdisSrsXapi,
|
||||
force,
|
||||
bypassAssert,
|
||||
})
|
||||
}
|
||||
|
||||
migrate.params = {
|
||||
@@ -1297,7 +1287,8 @@ async function import_({ data, sr, type = 'xva', url }) {
|
||||
throw invalidParameters('URL import is only compatible with XVA')
|
||||
}
|
||||
|
||||
return (await xapi.importVm(await hrp(url), { srId, type })).$id
|
||||
const ref = await xapi.VM_import(await hrp(url), sr._xapiRef)
|
||||
return xapi.call('VM.get_by_uuid', ref)
|
||||
}
|
||||
|
||||
return {
|
||||
|
||||
@@ -965,10 +965,7 @@ async function _importGlusterVM(xapi, template, lvmsrId) {
|
||||
namespace: 'xosan',
|
||||
version: template.version,
|
||||
})
|
||||
const newVM = await xapi.importVm(templateStream, {
|
||||
srId: lvmsrId,
|
||||
type: 'xva',
|
||||
})
|
||||
const newVM = await xapi.VM_import(templateStream, this.getObject(lvmsrId, 'SR')._xapiRef)
|
||||
await xapi.editVm(newVM, {
|
||||
autoPoweron: true,
|
||||
name_label: 'XOSAN imported VM',
|
||||
|
||||
@@ -147,10 +147,13 @@ export default class Redis extends Collection {
|
||||
model = this._serialize(model) ?? model
|
||||
|
||||
// Generate a new identifier if necessary.
|
||||
if (model.id === undefined) {
|
||||
model.id = generateUuid()
|
||||
let { id } = model
|
||||
if (id === undefined) {
|
||||
id = generateUuid()
|
||||
} else {
|
||||
// identifier is not stored as value in the database, it's already part of the key
|
||||
delete model.id
|
||||
}
|
||||
const { id } = model
|
||||
|
||||
const newEntry = await redis.sAdd(prefix + '_ids', id)
|
||||
|
||||
@@ -172,16 +175,7 @@ export default class Redis extends Collection {
|
||||
}
|
||||
|
||||
const key = `${prefix}:${id}`
|
||||
const props = {}
|
||||
for (const name of Object.keys(model)) {
|
||||
if (name !== 'id') {
|
||||
const value = model[name]
|
||||
if (value !== undefined) {
|
||||
props[name] = String(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
const promises = [redis.del(key), redis.hSet(key, props)]
|
||||
const promises = [redis.del(key), redis.set(key, JSON.stringify(model))]
|
||||
|
||||
// Update indexes.
|
||||
forEach(indexes, index => {
|
||||
@@ -206,12 +200,13 @@ export default class Redis extends Collection {
|
||||
|
||||
let model
|
||||
try {
|
||||
model = await redis.hGetAll(key)
|
||||
model = await redis.get(key).then(JSON.parse)
|
||||
} catch (error) {
|
||||
if (!error.message.startsWith('WRONGTYPE')) {
|
||||
throw error
|
||||
}
|
||||
model = await redis.get(key).then(JSON.parse)
|
||||
|
||||
model = await redis.hGetAll(key)
|
||||
}
|
||||
|
||||
return model
|
||||
|
||||
@@ -13,6 +13,7 @@ import memoryStoreFactory from 'memorystore'
|
||||
import merge from 'lodash/merge.js'
|
||||
import ms from 'ms'
|
||||
import once from 'lodash/once.js'
|
||||
import proxyAddr from 'proxy-addr'
|
||||
import proxyConsole from './proxy-console.mjs'
|
||||
import pw from 'pw'
|
||||
import serveStatic from 'serve-static'
|
||||
@@ -584,7 +585,7 @@ const setUpStaticFiles = (express, opts) => {
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const setUpApi = (webServer, xo, config) => {
|
||||
const setUpApi = (webServer, xo, config, useForwardedHeaders) => {
|
||||
const webSocketServer = new WebSocketServer({
|
||||
...config.apiWebSocketOptions,
|
||||
|
||||
@@ -593,7 +594,7 @@ const setUpApi = (webServer, xo, config) => {
|
||||
xo.hooks.on('stop', () => fromCallback.call(webSocketServer, 'close'))
|
||||
|
||||
const onConnection = (socket, upgradeReq) => {
|
||||
const { remoteAddress } = upgradeReq.socket
|
||||
const remoteAddress = proxyAddr(upgradeReq, useForwardedHeaders)
|
||||
|
||||
// Create the abstract XO object for this connection.
|
||||
const connection = xo.createApiConnection(remoteAddress)
|
||||
@@ -653,7 +654,7 @@ const setUpApi = (webServer, xo, config) => {
|
||||
|
||||
const CONSOLE_PROXY_PATH_RE = /^\/api\/consoles\/(.*)$/
|
||||
|
||||
const setUpConsoleProxy = (webServer, xo) => {
|
||||
const setUpConsoleProxy = (webServer, xo, useForwardedHeaders) => {
|
||||
const webSocketServer = new WebSocketServer({
|
||||
noServer: true,
|
||||
})
|
||||
@@ -678,7 +679,7 @@ const setUpConsoleProxy = (webServer, xo) => {
|
||||
throw invalidCredentials()
|
||||
}
|
||||
|
||||
const { remoteAddress } = socket
|
||||
const remoteAddress = proxyAddr(req, useForwardedHeaders)
|
||||
log.info(`+ Console proxy (${user.name} - ${remoteAddress})`)
|
||||
|
||||
const data = {
|
||||
@@ -836,8 +837,20 @@ export default async function main(args) {
|
||||
// Trigger a clean job.
|
||||
await xo.hooks.clean()
|
||||
|
||||
const useForwardedHeaders = (() => {
|
||||
// recompile the fonction when the setting change
|
||||
let useForwardedHeaders
|
||||
xo.config.watch('http.useForwardedHeaders', val => {
|
||||
useForwardedHeaders = typeof val === 'boolean' ? () => val : proxyAddr.compile(val)
|
||||
})
|
||||
|
||||
return (...args) => useForwardedHeaders(...args)
|
||||
})()
|
||||
|
||||
express.set('trust proxy', useForwardedHeaders)
|
||||
|
||||
// Must be set up before the API.
|
||||
setUpConsoleProxy(webServer, xo)
|
||||
setUpConsoleProxy(webServer, xo, useForwardedHeaders)
|
||||
|
||||
// Must be set up before the API.
|
||||
express.use(xo._handleHttpRequest.bind(xo))
|
||||
@@ -847,7 +860,7 @@ export default async function main(args) {
|
||||
await setUpPassport(express, xo, config)
|
||||
|
||||
// Must be set up before the static files.
|
||||
setUpApi(webServer, xo, config)
|
||||
setUpApi(webServer, xo, config, useForwardedHeaders)
|
||||
|
||||
setUpProxies(express, config.http.proxies, xo)
|
||||
|
||||
|
||||
@@ -20,7 +20,10 @@ export class Remotes extends Collection {
|
||||
|
||||
_unserialize(remote) {
|
||||
remote.benchmarks = parseProp('remote', remote, 'benchmarks')
|
||||
remote.enabled = remote.enabled === 'true'
|
||||
|
||||
const { enabled } = remote
|
||||
remote.enabled = typeof enabled === 'boolean' ? enabled : enabled === 'true'
|
||||
|
||||
remote.error = parseProp('remote', remote, 'error', remote.error)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -389,7 +389,7 @@ export default class Xapi extends XapiBase {
|
||||
|
||||
const onVmCreation = nameLabel !== undefined ? vm => vm.set_name_label(nameLabel) : null
|
||||
|
||||
const vm = await targetXapi._getOrWaitObject(await targetXapi._importVm(stream, sr, onVmCreation))
|
||||
const vm = await targetXapi._getOrWaitObject(await targetXapi.VM_import(stream, sr.$ref, onVmCreation))
|
||||
|
||||
return {
|
||||
vm,
|
||||
@@ -674,36 +674,6 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
}
|
||||
|
||||
@cancelable
|
||||
async _importVm($cancelToken, stream, sr, onVmCreation = undefined) {
|
||||
const taskRef = await this.task_create('VM import')
|
||||
const query = {}
|
||||
|
||||
if (sr != null) {
|
||||
query.sr_id = sr.$ref
|
||||
}
|
||||
|
||||
if (onVmCreation != null) {
|
||||
this.waitObject(
|
||||
obj => obj != null && obj.current_operations != null && taskRef in obj.current_operations,
|
||||
onVmCreation
|
||||
)
|
||||
}
|
||||
|
||||
const vmRef = await this.putResource($cancelToken, stream, '/import/', {
|
||||
query,
|
||||
task: taskRef,
|
||||
}).then(extractOpaqueRef, error => {
|
||||
// augment the error with as much relevant info as possible
|
||||
error.pool_master = this.pool.$master
|
||||
error.SR = sr
|
||||
|
||||
throw error
|
||||
})
|
||||
|
||||
return vmRef
|
||||
}
|
||||
|
||||
@decorateWith(deferrable)
|
||||
async _importOvaVm($defer, stream, { descriptionLabel, disks, memory, nameLabel, networks, nCpus, tables }, sr) {
|
||||
// 1. Create VM.
|
||||
@@ -812,7 +782,7 @@ export default class Xapi extends XapiBase {
|
||||
const sr = srId && this.getObject(srId)
|
||||
|
||||
if (type === 'xva') {
|
||||
return /* await */ this._getOrWaitObject(await this._importVm(stream, sr))
|
||||
return /* await */ this._getOrWaitObject(await this.VM_import(stream, sr?.$ref))
|
||||
}
|
||||
|
||||
if (type === 'ova') {
|
||||
|
||||
@@ -534,7 +534,7 @@ export default {
|
||||
}
|
||||
|
||||
// Remember on which hosts the running VMs are
|
||||
const vmsByHost = mapValues(
|
||||
const vmRefsByHost = mapValues(
|
||||
groupBy(
|
||||
filter(this.objects.all, {
|
||||
$type: 'VM',
|
||||
@@ -551,7 +551,7 @@ export default {
|
||||
return hostId
|
||||
}
|
||||
),
|
||||
vms => vms.map(vm => vm.$id)
|
||||
vms => vms.map(vm => vm.$ref)
|
||||
)
|
||||
|
||||
// Put master in first position to restart it first
|
||||
@@ -623,20 +623,23 @@ export default {
|
||||
continue
|
||||
}
|
||||
|
||||
const vmIds = vmsByHost[hostId]
|
||||
const vmRefs = vmRefsByHost[hostId]
|
||||
|
||||
if (vmIds === undefined) {
|
||||
if (vmRefs === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const residentVms = host.$resident_VMs.map(vm => vm.uuid)
|
||||
// host.$resident_VMs is outdated and returns resident VMs before the host.evacuate.
|
||||
// this.getField is used in order not to get cached data.
|
||||
const residentVmRefs = await this.getField('host', host.$ref, 'resident_VMs')
|
||||
|
||||
for (const vmId of vmIds) {
|
||||
if (residentVms.includes(vmId)) {
|
||||
for (const vmRef of vmRefs) {
|
||||
if (residentVmRefs.includes(vmRef)) {
|
||||
continue
|
||||
}
|
||||
|
||||
try {
|
||||
const vmId = await this.getField('VM', vmRef, 'uuid')
|
||||
await this.migrateVm(vmId, this, hostId)
|
||||
} catch (err) {
|
||||
log.error(err)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import hrp from 'http-request-plus'
|
||||
import ProxyAgent from 'proxy-agent'
|
||||
import { ProxyAgent } from 'proxy-agent'
|
||||
|
||||
const { env } = process
|
||||
|
||||
|
||||
@@ -271,13 +271,13 @@ export default class Proxy {
|
||||
[namespace]: { xva },
|
||||
} = await app.getResourceCatalog()
|
||||
const xapi = app.getXapi(srId)
|
||||
const vm = await xapi.importVm(
|
||||
const vm = await xapi.VM_import(
|
||||
await app.requestResource({
|
||||
id: xva.id,
|
||||
namespace,
|
||||
version: xva.version,
|
||||
}),
|
||||
{ srId }
|
||||
srId && this.getObject(srId, 'SR')._xapiRef
|
||||
)
|
||||
$defer.onFailure(() => xapi.VM_destroy(vm.$ref))
|
||||
|
||||
|
||||
@@ -175,7 +175,7 @@ export default class RestApi {
|
||||
})
|
||||
)
|
||||
|
||||
collections.backups = { id: 'backups' }
|
||||
collections.backup = { id: 'backup' }
|
||||
collections.restore = { id: 'restore' }
|
||||
collections.tasks = { id: 'tasks' }
|
||||
collections.users = { id: 'users' }
|
||||
@@ -280,23 +280,26 @@ export default class RestApi {
|
||||
wrap((req, res) => sendObjects(collections, req, res))
|
||||
)
|
||||
|
||||
// For compatibility redirect from /backups* to /backup
|
||||
api.get('/backups*', (req, res) => {
|
||||
res.redirect(308, req.baseUrl + '/backup' + req.params[0])
|
||||
})
|
||||
|
||||
const backupTypes = {
|
||||
__proto__: null,
|
||||
|
||||
metadata: 'metadataBackup',
|
||||
mirror: 'mirrorBackup',
|
||||
vm: 'backup',
|
||||
}
|
||||
|
||||
api
|
||||
.get(
|
||||
'/backups',
|
||||
'/backup',
|
||||
wrap((req, res) => sendObjects([{ id: 'jobs' }, { id: 'logs' }], req, res))
|
||||
)
|
||||
.get(
|
||||
'/backups/jobs',
|
||||
wrap(async (req, res) => sendObjects(await app.getAllJobs('backup'), req, res))
|
||||
)
|
||||
.get(
|
||||
'/backups/jobs/:id',
|
||||
wrap(async (req, res) => {
|
||||
res.json(await app.getJob(req.params.id, 'backup'))
|
||||
})
|
||||
)
|
||||
.get(
|
||||
'/backups/logs',
|
||||
'/backup/logs',
|
||||
wrap(async (req, res) => {
|
||||
const { filter, limit } = req.query
|
||||
const logs = await app.getBackupNgLogsSorted({
|
||||
@@ -306,6 +309,37 @@ export default class RestApi {
|
||||
await sendObjects(logs, req, res)
|
||||
})
|
||||
)
|
||||
.get(
|
||||
'/backup/jobs',
|
||||
wrap((req, res) =>
|
||||
sendObjects(
|
||||
Object.keys(backupTypes).map(id => ({ id })),
|
||||
req,
|
||||
res
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
for (const [collection, type] of Object.entries(backupTypes)) {
|
||||
api
|
||||
.get(
|
||||
'/backup/jobs/' + collection,
|
||||
wrap(async (req, res) => sendObjects(await app.getAllJobs(type), req, res))
|
||||
)
|
||||
.get(
|
||||
`/backup/jobs/${collection}/:id`,
|
||||
wrap(async (req, res) => {
|
||||
res.json(await app.getJob(req.params.id, type))
|
||||
}, true)
|
||||
)
|
||||
}
|
||||
|
||||
// For compatibility, redirect /backup/jobs/:id to /backup/jobs/vm/:id
|
||||
api.get('/backup/jobs/:id', (req, res) => {
|
||||
res.redirect(308, req.baseUrl + '/backup/jobs/vm/' + req.params.id)
|
||||
})
|
||||
|
||||
api
|
||||
.get(
|
||||
'/restore',
|
||||
wrap((req, res) => sendObjects([{ id: 'logs' }], req, res))
|
||||
|
||||
@@ -53,6 +53,7 @@ export default class {
|
||||
this._db = (async () => {
|
||||
await fse.ensureDir(dir)
|
||||
await fse.access(dir, fse.constants.R_OK | fse.constants.W_OK)
|
||||
await fse.chmod(dir, 0o700)
|
||||
return levelup(dir)
|
||||
})()
|
||||
}
|
||||
|
||||
@@ -586,6 +586,7 @@ const messages = {
|
||||
editJobNotFound: "The job you're trying to edit wasn't found",
|
||||
preferNbd: 'Use NBD protocol to transfer disk if available',
|
||||
preferNbdInformation: 'A network accessible by XO or the proxy must have NBD enabled',
|
||||
nbdConcurrency: 'Number of NBD connexion per disk',
|
||||
|
||||
// ------ New Remote -----
|
||||
newRemote: 'New file system remote',
|
||||
@@ -864,6 +865,7 @@ const messages = {
|
||||
srDisconnectAll: 'Disconnect from all hosts',
|
||||
srForget: 'Forget this SR',
|
||||
srsForget: 'Forget SRs',
|
||||
nSrsForget: 'Forget {nSrs, number} SR{nSrs, plural, one {} other{s}}',
|
||||
srRemoveButton: 'Remove this SR',
|
||||
srNoVdis: 'No VDIs in this storage',
|
||||
srReclaimSpace: 'Reclaim freed space',
|
||||
@@ -1788,6 +1790,9 @@ const messages = {
|
||||
availableBackupsColumn: 'Available Backups',
|
||||
backupRestoreErrorTitle: 'Missing parameters',
|
||||
backupRestoreErrorMessage: 'Choose a SR and a backup',
|
||||
backupisKey: 'key',
|
||||
backupIsIncremental: 'incremental',
|
||||
backupIsDifferencing: 'differencing',
|
||||
vmsToBackup: 'VMs to backup',
|
||||
refreshBackupList: 'Refresh backup list',
|
||||
restoreVmBackups: 'Restore',
|
||||
@@ -2375,11 +2380,9 @@ const messages = {
|
||||
srDisconnectAllModalMessage: 'This will disconnect this SR from all its hosts.',
|
||||
srsDisconnectAllModalMessage:
|
||||
'This will disconnect each selected SR from its host (local SR) or from every hosts of its pool (shared SR).',
|
||||
srForgetModalTitle: 'Forget SR',
|
||||
srsForgetModalTitle: 'Forget selected SRs',
|
||||
srForgetModalMessage: "Are you sure you want to forget this SR? VDIs on this storage won't be removed.",
|
||||
srsForgetModalMessage:
|
||||
"Are you sure you want to forget all the selected SRs? VDIs on these storages won't be removed.",
|
||||
forgetNSrsModalMessage: 'Are you sure you want to forget {nSrs, number} SR{nSrs, plural, one {} other{s}}?',
|
||||
srForgetModalWarning:
|
||||
'You will lose all the metadata, meaning all the links between the VDIs (disks) and their respective VMs. This operation cannot be undone.',
|
||||
srAllDisconnected: 'Disconnected',
|
||||
srSomeConnected: 'Partially connected',
|
||||
srAllConnected: 'Connected',
|
||||
|
||||
@@ -641,24 +641,39 @@ const xoItemToRender = {
|
||||
<span>{group.name_label.startsWith('Group of ') ? group.name_label.slice(9) : group.name_label}</span>
|
||||
),
|
||||
|
||||
backup: backup => (
|
||||
<span>
|
||||
<span className='tag tag-info' style={{ textTransform: 'capitalize' }}>
|
||||
{backup.mode}
|
||||
</span>{' '}
|
||||
<span className='tag tag-warning'>{backup.remote.name}</span>{' '}
|
||||
{backup.size !== undefined && <span className='tag tag-info'>{formatSize(backup.size)}</span>}{' '}
|
||||
<FormattedDate
|
||||
value={new Date(backup.timestamp)}
|
||||
month='long'
|
||||
day='numeric'
|
||||
year='numeric'
|
||||
hour='2-digit'
|
||||
minute='2-digit'
|
||||
second='2-digit'
|
||||
/>
|
||||
</span>
|
||||
),
|
||||
backup: backup => {
|
||||
const nbDifferencings = backup.differencingVhds ?? 0
|
||||
const nbKeys = backup.dynamicVhds ?? 0
|
||||
const nbDisks = backup.disks.length
|
||||
return (
|
||||
<span>
|
||||
<span className='tag tag-info' style={{ textTransform: 'capitalize' }}>
|
||||
{backup.mode === 'delta' ? _('backupIsIncremental') : backup.mode}
|
||||
</span>{' '}
|
||||
<span className='tag tag-warning'>{backup.remote.name}</span>{' '}
|
||||
{nbDifferencings > 0 && (
|
||||
<span className='tag tag-info'>
|
||||
{nbDifferencings < nbDisks && `${nbDifferencings}/${nbDisks}`} {_('backupIsDifferencing')}{' '}
|
||||
</span>
|
||||
)}
|
||||
{nbKeys > 0 && (
|
||||
<span className='tag tag-info'>
|
||||
{nbKeys < nbDisks && `${nbKeys}/${nbDisks}`} {_('backupisKey')}{' '}
|
||||
</span>
|
||||
)}
|
||||
{backup.size !== undefined && <span className='tag tag-info'>{formatSize(backup.size)}</span>}{' '}
|
||||
<FormattedDate
|
||||
value={new Date(backup.timestamp)}
|
||||
month='long'
|
||||
day='numeric'
|
||||
year='numeric'
|
||||
hour='2-digit'
|
||||
minute='2-digit'
|
||||
second='2-digit'
|
||||
/>
|
||||
</span>
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
const renderXoItem = (item, { className, type: xoType, ...props } = {}) => {
|
||||
|
||||
@@ -2268,15 +2268,20 @@ export const deleteSr = sr =>
|
||||
|
||||
export const fetchSrStats = (sr, granularity) => _call('sr.stats', { id: resolveId(sr), granularity })
|
||||
|
||||
export const forgetSr = sr =>
|
||||
confirm({
|
||||
title: _('srForgetModalTitle'),
|
||||
body: _('srForgetModalMessage'),
|
||||
}).then(() => _call('sr.forget', { id: resolveId(sr) }), noop)
|
||||
export const forgetSr = sr => forgetSrs([sr])
|
||||
|
||||
export const forgetSrs = srs =>
|
||||
confirm({
|
||||
title: _('srsForgetModalTitle'),
|
||||
body: _('srsForgetModalMessage'),
|
||||
title: _('nSrsForget', { nSrs: srs.length }),
|
||||
body: (
|
||||
<p className='text-warning font-weight-bold'>
|
||||
{_('forgetNSrsModalMessage', { nSrs: srs.length })} {_('srForgetModalWarning')}
|
||||
</p>
|
||||
),
|
||||
strongConfirm: {
|
||||
messageId: 'nSrsForget',
|
||||
values: { nSrs: srs.length },
|
||||
},
|
||||
}).then(() => Promise.all(map(resolveIds(srs), id => _call('sr.forget', { id }))), noop)
|
||||
|
||||
export const reconnectAllHostsSr = sr =>
|
||||
|
||||
@@ -188,6 +188,7 @@ const getInitialState = ({ preSelectedVmIds, setHomeVmIdsSelection, suggestedExc
|
||||
deltaMode: false,
|
||||
drMode: false,
|
||||
name: '',
|
||||
nbdConcurrency: 1,
|
||||
preferNbd: false,
|
||||
remotes: [],
|
||||
schedules: {},
|
||||
@@ -629,6 +630,11 @@ const New = decorate([
|
||||
preferNbd,
|
||||
})
|
||||
},
|
||||
setNbdConcurrency({ setGlobalSettings }, nbdConcurrency) {
|
||||
setGlobalSettings({
|
||||
nbdConcurrency,
|
||||
})
|
||||
},
|
||||
},
|
||||
computed: {
|
||||
compressionId: generateId,
|
||||
@@ -637,6 +643,7 @@ const New = decorate([
|
||||
inputFullIntervalId: generateId,
|
||||
inputMaxExportRate: generateId,
|
||||
inputPreferNbd: generateId,
|
||||
inputNbdConcurrency: generateId,
|
||||
inputTimeoutId: generateId,
|
||||
|
||||
// In order to keep the user preference, the offline backup is kept in the DB
|
||||
@@ -748,6 +755,7 @@ const New = decorate([
|
||||
concurrency,
|
||||
fullInterval,
|
||||
maxExportRate,
|
||||
nbdConcurrency = 1,
|
||||
offlineBackup,
|
||||
offlineSnapshot,
|
||||
preferNbd,
|
||||
@@ -1040,6 +1048,20 @@ const New = decorate([
|
||||
/>
|
||||
</FormGroup>
|
||||
)}
|
||||
{state.isDelta && (
|
||||
<FormGroup>
|
||||
<label htmlFor={state.inputNbdConcurrency}>
|
||||
<strong>{_('nbdConcurrency')}</strong>
|
||||
</label>
|
||||
<Number
|
||||
id={state.inputNbdConcurrency}
|
||||
min={1}
|
||||
onChange={effects.setNbdConcurrency}
|
||||
value={nbdConcurrency}
|
||||
disabled={!state.inputPreferNbd}
|
||||
/>
|
||||
</FormGroup>
|
||||
)}
|
||||
<FormGroup>
|
||||
<label htmlFor={state.inputMaxExportRate}>
|
||||
<strong>{_('speedLimit')}</strong>
|
||||
|
||||
@@ -68,7 +68,7 @@ const normalize = state => {
|
||||
schedules = mapValues(schedules, ({ id, ...schedule }) => schedule)
|
||||
settings[''] = {
|
||||
...advancedSettings,
|
||||
reportWhen: reportWhen.value,
|
||||
reportWhen: reportWhen.value ?? reportWhen,
|
||||
reportRecipients: reportRecipients.length !== 0 ? reportRecipients : undefined,
|
||||
}
|
||||
return {
|
||||
|
||||
@@ -123,6 +123,10 @@ async function readPackagesFromChangelog(toRelease) {
|
||||
}
|
||||
|
||||
const { name, releaseType } = match.groups
|
||||
if (name in toRelease) {
|
||||
throw new Error('duplicate package to release in CHANGELOG.unreleased.md: ' + name)
|
||||
}
|
||||
|
||||
toRelease[name] = releaseType
|
||||
})
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user