Compare commits

..

1 Commits

Author SHA1 Message Date
Julien Fontanet
4b5d97f978 WiP: xapi-typegen 2022-09-02 11:10:15 +02:00
81 changed files with 8425 additions and 9720 deletions

View File

@@ -1,71 +0,0 @@
'use strict'
const LRU = require('lru-cache')
const Fuse = require('fuse-native')
const { VhdSynthetic } = require('vhd-lib')
const { Disposable, fromCallback } = require('promise-toolbox')
const { createLogger } = require('@xen-orchestra/log')
const { warn } = createLogger('vates:fuse-vhd')
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
const stat = st => ({
mtime: st.mtime || new Date(),
atime: st.atime || new Date(),
ctime: st.ctime || new Date(),
size: st.size !== undefined ? st.size : 0,
mode: st.mode === 'dir' ? 16877 : st.mode === 'file' ? 33188 : st.mode === 'link' ? 41453 : st.mode,
uid: st.uid !== undefined ? st.uid : process.getuid(),
gid: st.gid !== undefined ? st.gid : process.getgid(),
})
exports.mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
const cache = new LRU({
max: 16, // each cached block is 2MB in size
})
await vhd.readBlockAllocationTable()
const fuse = new Fuse(mountDir, {
async readdir(path, cb) {
if (path === '/') {
return cb(null, ['vhd0'])
}
cb(Fuse.ENOENT)
},
async getattr(path, cb) {
if (path === '/') {
return cb(
null,
stat({
mode: 'dir',
size: 4096,
})
)
}
if (path === '/vhd0') {
return cb(
null,
stat({
mode: 'file',
size: vhd.footer.currentSize,
})
)
}
cb(Fuse.ENOENT)
},
read(path, fd, buf, len, pos, cb) {
if (path === '/vhd0') {
return vhd
.readRawData(pos, len, cache, buf)
.then(cb)
}
throw new Error(`read file ${path} not exists`)
},
})
return new Disposable(
() => fromCallback(() => fuse.unmount()),
fromCallback(() => fuse.mount())
)
})

View File

@@ -1,30 +0,0 @@
{
"name": "@vates/fuse-vhd",
"version": "1.0.0",
"license": "ISC",
"private": false,
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/fuse-vhd",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"engines": {
"node": ">=10.0"
},
"dependencies": {
"@xen-orchestra/log": "^0.3.0",
"fuse-native": "^2.2.6",
"lru-cache": "^7.14.0",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^4.1.0"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -1 +0,0 @@
../../scripts/npmignore

View File

@@ -1,41 +0,0 @@
export const INIT_PASSWD = 'NBDMAGIC' // "NBDMAGIC" ensure we're connected to a nbd server
export const OPTS_MAGIC = 'IHAVEOPT' // "IHAVEOPT" start an option block
export const NBD_OPT_REPLY_MAGIC = 0x3e889045565a9 // magic received during negociation
export const NBD_OPT_EXPORT_NAME = 1
export const NBD_OPT_ABORT = 2
export const NBD_OPT_LIST = 3
export const NBD_OPT_STARTTLS = 5
export const NBD_OPT_INFO = 6
export const NBD_OPT_GO = 7
export const NBD_FLAG_HAS_FLAGS = 1 << 0
export const NBD_FLAG_READ_ONLY = 1 << 1
export const NBD_FLAG_SEND_FLUSH = 1 << 2
export const NBD_FLAG_SEND_FUA = 1 << 3
export const NBD_FLAG_ROTATIONAL = 1 << 4
export const NBD_FLAG_SEND_TRIM = 1 << 5
export const NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
export const NBD_CMD_FLAG_FUA = 1 << 0
export const NBD_CMD_FLAG_NO_HOLE = 1 << 1
export const NBD_CMD_FLAG_DF = 1 << 2
export const NBD_CMD_FLAG_REQ_ONE = 1 << 3
export const NBD_CMD_FLAG_FAST_ZERO = 1 << 4
export const NBD_CMD_READ = 0
export const NBD_CMD_WRITE = 1
export const NBD_CMD_DISC = 2
export const NBD_CMD_FLUSH = 3
export const NBD_CMD_TRIM = 4
export const NBD_CMD_CACHE = 5
export const NBD_CMD_WRITE_ZEROES = 6
export const NBD_CMD_BLOCK_STATUS = 7
export const NBD_CMD_RESIZE = 8
export const NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
export const NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
export const NBD_DEFAULT_PORT = 10809
export const NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
export const MAX_BUFFER_LENGTH = 10 * 1024 * 1024

View File

@@ -1,272 +0,0 @@
import assert from 'node:assert'
import { Socket } from 'node:net'
import { connect } from 'node:tls'
import {
INIT_PASSWD,
MAX_BUFFER_LENGTH,
NBD_CMD_READ,
NBD_DEFAULT_BLOCK_SIZE,
NBD_DEFAULT_PORT,
NBD_FLAG_FIXED_NEWSTYLE,
NBD_FLAG_HAS_FLAGS,
NBD_OPT_EXPORT_NAME,
NBD_OPT_REPLY_MAGIC,
NBD_OPT_STARTTLS,
NBD_REPLY_MAGIC,
NBD_REQUEST_MAGIC,
OPTS_MAGIC,
} from './constants.mjs'
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
export default class NbdClient {
_serverAddress
_serverCert
_serverPort
_serverSocket
_useSecureConnection = false
_exportname
_nbDiskBlocks = 0
_receptionBuffer = Buffer.alloc(0)
_sendingBuffer = Buffer.alloc(0)
// ensure the read are resolved in the right order
_rawReadResolve = []
_rawReadLength = []
// AFAIK, there is no guaranty the server answer in the same order as the query
_nextCommandQueryId = BigInt(0)
_commandQueries = {} // map of queries waiting for an answer
constructor({ address, port = NBD_DEFAULT_PORT, exportname, cert, secure = true }) {
this._address = address
this._serverPort = port
this._exportname = exportname
this._serverCert = cert
this._useSecureConnection = secure
}
get nbBlocks() {
return this._nbDiskBlocks
}
_handleData(data) {
if (data !== undefined) {
this._receptionBuffer = Buffer.concat([this._receptionBuffer, Buffer.from(data)])
}
if (this._receptionBuffer.length > MAX_BUFFER_LENGTH) {
throw new Error(
`Buffer grown too much with a total size of ${this._receptionBuffer.length} bytes (last chunk is ${data.length})`
)
}
// if we're waiting for a specific bit length (in the handshake for example or a block data)
while (this._rawReadResolve.length > 0 && this._receptionBuffer.length >= this._rawReadLength[0]) {
const resolve = this._rawReadResolve.shift()
const waitingForLength = this._rawReadLength.shift()
resolve(this._takeFromBuffer(waitingForLength))
}
if (this._rawReadResolve.length === 0 && this._receptionBuffer.length > 4) {
if (this._receptionBuffer.readInt32BE(0) === NBD_REPLY_MAGIC) {
this._readBlockResponse()
}
// keep the received bits in the buffer for subsequent use
}
}
async _addListenners() {
const serverSocket = this._serverSocket
serverSocket.on('data', data => this._handleData(data))
serverSocket.on('close', function () {
console.log('Connection closed')
})
serverSocket.on('error', function (err) {
throw err
})
}
async _tlsConnect() {
return new Promise(resolve => {
this._serverSocket = connect(
{
socket: this._serverSocket,
rejectUnauthorized: false,
cert: this._serverCert,
},
resolve
)
this._addListenners()
})
}
async _unsecureConnect() {
this._serverSocket = new Socket()
this._addListenners()
return new Promise((resolve, reject) => {
this._serverSocket.connect(this._serverPort, this._serverAddress, () => {
resolve()
})
})
}
async connect() {
await this._unsecureConnect()
await this._handshake()
}
async disconnect() {
await this._serverSocket.destroy()
}
async _sendOption(option, buffer = Buffer.alloc(0)) {
await this._writeToSocket(OPTS_MAGIC)
await this._writeToSocketInt32(option)
await this._writeToSocketInt32(buffer.length)
await this._writeToSocket(buffer)
assert(await this._readFromSocketInt64(), NBD_OPT_REPLY_MAGIC) // magic number everywhere
assert(await this._readFromSocketInt32(), option) // the option passed
assert(await this._readFromSocketInt32(), 1) // ACK
const length = await this._readFromSocketInt32()
assert(length === 0) // length
}
async _handshake() {
assert(await this._readFromSocket(8), INIT_PASSWD)
assert(await this._readFromSocket(8), OPTS_MAGIC)
const flagsBuffer = await this._readFromSocket(2)
const flags = flagsBuffer.readInt16BE(0)
assert(flags | NBD_FLAG_FIXED_NEWSTYLE) // only FIXED_NEWSTYLE one is supported from the server options
await this._writeToSocketInt32(NBD_FLAG_FIXED_NEWSTYLE) // client also support NBD_FLAG_C_FIXED_NEWSTYLE
if (this._useSecureConnection) {
// upgrade socket to TLS
await this._sendOption(NBD_OPT_STARTTLS)
await this._tlsConnect()
}
// send export name required it also implictly closes the negociation phase
await this._writeToSocket(Buffer.from(OPTS_MAGIC))
await this._writeToSocketInt32(NBD_OPT_EXPORT_NAME)
await this._writeToSocketInt32(this._exportname.length)
await this._writeToSocket(Buffer.from(this._exportname))
// 8 + 2 + 124
const answer = await this._readFromSocket(134)
const exportSize = answer.readBigUInt64BE(0)
const transmissionFlags = answer.readInt16BE(8)
assert(transmissionFlags & NBD_FLAG_HAS_FLAGS, 'NBD_FLAG_HAS_FLAGS') // must always be 1 by the norm
// xapi server always send NBD_FLAG_READ_ONLY (3) as a flag
this._nbDiskBlocks = Number(exportSize / BigInt(NBD_DEFAULT_BLOCK_SIZE))
this._exportSize = exportSize
}
_takeFromBuffer(length) {
const res = Buffer.from(this._receptionBuffer.slice(0, length))
this._receptionBuffer = this._receptionBuffer.slice(length)
return res
}
_readFromSocket(length) {
if (this._receptionBuffer.length >= length) {
return this._takeFromBuffer(length)
}
return new Promise(resolve => {
this._rawReadResolve.push(resolve)
this._rawReadLength.push(length)
})
}
_writeToSocket(buffer) {
return new Promise(resolve => {
this._serverSocket.write(buffer, resolve)
})
}
async _readFromSocketInt32() {
const buffer = await this._readFromSocket(4)
return buffer.readInt32BE(0)
}
async _readFromSocketInt64() {
const buffer = await this._readFromSocket(8)
return buffer.readBigUInt64BE(0)
}
_writeToSocketUInt32(int) {
const buffer = Buffer.alloc(4)
buffer.writeUInt32BE(int)
return this._writeToSocket(buffer)
}
_writeToSocketInt32(int) {
const buffer = Buffer.alloc(4)
buffer.writeInt32BE(int)
return this._writeToSocket(buffer)
}
_writeToSocketInt16(int) {
const buffer = Buffer.alloc(2)
buffer.writeInt16BE(int)
return this._writeToSocket(buffer)
}
_writeToSocketInt64(int) {
const buffer = Buffer.alloc(8)
buffer.writeBigUInt64BE(BigInt(int))
return this._writeToSocket(buffer)
}
async _readBlockResponse() {
const magic = await this._readFromSocketInt32()
if (magic !== NBD_REPLY_MAGIC) {
throw new Error(`magic number for block answer is wrong : ${magic}`)
}
// error
const error = await this._readFromSocketInt32()
if (error !== 0) {
throw new Error(`GOT ERROR CODE : ${error}`)
}
const blockQueryId = await this._readFromSocketInt64()
const query = this._commandQueries[blockQueryId]
if (!query) {
throw new Error(` no query associated with id ${blockQueryId} ${Object.keys(this._commandQueries)}`)
}
delete this._commandQueries[blockQueryId]
const data = await this._readFromSocket(query.size)
assert.strictEqual(data.length, query.size)
query.resolve(data)
this._handleData()
}
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
const queryId = this._nextCommandQueryId
this._nextCommandQueryId++
const buffer = Buffer.alloc(28)
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0)
buffer.writeInt16BE(0, 4) // no command flags for a simple block read
buffer.writeInt16BE(NBD_CMD_READ, 6)
buffer.writeBigUInt64BE(queryId, 8)
// byte offset in the raw disk
const offset = BigInt(index) * BigInt(size)
buffer.writeBigUInt64BE(offset, 16)
// ensure we do not read after the end of the export (which immediatly disconnect us)
const maxSize = Math.min(Number(this._exportSize - offset), size)
// size wanted
buffer.writeInt32BE(maxSize, 24)
return new Promise(resolve => {
this._commandQueries[queryId] = {
size: maxSize,
resolve,
}
// write command at once to ensure no concurrency issue
this._writeToSocket(buffer)
})
}
}

View File

@@ -1,75 +0,0 @@
import assert from 'assert'
import NbdClient from './index.mjs'
import { spawn } from 'node:child_process'
import fs from 'node:fs/promises'
import { test } from 'tap'
import tmp from 'tmp'
import { pFromCallback } from 'promise-toolbox'
import { asyncEach } from '@vates/async-each'
const FILE_SIZE = 2 * 1024 * 1024
async function createTempFile(size) {
const tmpPath = await pFromCallback(cb => tmp.file(cb))
const data = Buffer.alloc(size, 0)
for (let i = 0; i < size; i += 4) {
data.writeUInt32BE(i, i)
}
await fs.writeFile(tmpPath, data)
return tmpPath
}
test('it works with unsecured network', async tap => {
const path = await createTempFile(FILE_SIZE)
const nbdServer = spawn(
'nbdkit',
[
'file',
path,
'--newstyle', //
'--exit-with-parent',
'--read-only',
'--export-name=MY_SECRET_EXPORT',
],
{
stdio: ['inherit', 'inherit', 'inherit'],
}
)
const client = new NbdClient({
address: 'localhost',
exportname: 'MY_SECRET_EXPORT',
secure: false,
})
await client.connect()
const CHUNK_SIZE = 32 * 1024 // non default size
const indexes = []
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
indexes.push(i)
}
// read mutiple blocks in parallel
await asyncEach(
indexes,
async i => {
const block = await client.readBlock(i, CHUNK_SIZE)
let blockOk = true
let firstFail
for (let j = 0; j < CHUNK_SIZE; j += 4) {
const wanted = i * CHUNK_SIZE + j
const found = block.readUInt32BE(j)
blockOk = blockOk && found === wanted
if (!blockOk && firstFail === undefined) {
firstFail = j
}
}
tap.ok(blockOk, `check block ${i} content`)
},
{ concurrency: 8 }
)
await client.disconnect()
nbdServer.kill()
await fs.unlink(path)
})

View File

@@ -1,152 +0,0 @@
import NbdClient from '../index.js'
import { Xapi } from 'xen-api'
import readline from 'node:readline'
import { stdin as input, stdout as output } from 'node:process'
import { asyncMap } from '@xen-orchestra/async-map'
import { downloadVhd, getFullBlocks, getChangedNbdBlocks } from './utils.mjs'
const xapi = new Xapi({
auth: {
user: 'root',
password: 'vateslab',
},
url: '172.16.210.11',
allowUnauthorized: true,
})
await xapi.connect()
const networks = await xapi.call('network.get_all_records')
const nbdNetworks = Object.values(networks).filter(
network => network.purpose.includes('nbd') || network.purpose.includes('insecure_nbd')
)
let secure = false
if (!nbdNetworks.length) {
console.log(`you don't have any nbd enabled network`)
console.log(`please add a purpose of nbd (to use tls) or insecure_nbd to oneof the host network`)
process.exit()
}
const network = nbdNetworks[0]
secure = network.purpose.includes('nbd')
console.log(`we will use network **${network.name_label}** ${secure ? 'with' : 'without'} TLS`)
const rl = readline.createInterface({ input, output })
const question = text => {
return new Promise(resolve => {
rl.question(text, resolve)
})
}
let vmuuid, vmRef
do {
vmuuid = await question('VM uuid ? ')
try {
vmRef = xapi.getObject(vmuuid).$ref
} catch (e) {
// console.log(e)
console.log('maybe the objects was not loaded, try again ')
await new Promise(resolve => setTimeout(resolve, 1000))
}
} while (!vmRef)
const vdiRefs = (
await asyncMap(await xapi.call('VM.get_VBDs', vmRef), async vbd => {
const vdi = await xapi.call('VBD.get_VDI', vbd)
return vdi
})
).filter(vdiRef => vdiRef !== 'OpaqueRef:NULL')
const vdiRef = vdiRefs[0]
const vdi = xapi.getObject(vdiRef)
console.log('Will work on vdi [', vdi.name_label, ']')
const cbt_enabled = vdi.cbt_enabled
console.log('Change block tracking is [', cbt_enabled ? 'enabled' : 'disabled', ']')
if (!cbt_enabled) {
const shouldEnable = await question('would you like to enable it ? Y/n ')
if (shouldEnable === 'Y') {
await xapi.call('VDI.enable_cbt', vdiRef)
console.log('CBT is now enable for this VDI')
console.log('You must make a snapshot, write some data and relaunch this script to backup changes')
} else {
console.warn('did nothing')
}
process.exit()
}
console.log('will search for suitable snapshots')
const snapshots = vdi.snapshots.map(snapshotRef => xapi.getObject(snapshotRef)).filter(({ cbt_enabled }) => cbt_enabled)
if (snapshots.length < 2) {
throw new Error(`not enough snapshots with cbt enabled , found ${snapshots.length} and 2 are needed`)
}
console.log('found snapshots will compare last two snapshots with cbt_enabled')
const snapshotRef = xapi.getObject(snapshots[snapshots.length - 1].uuid).$ref
const snapshotTarget = xapi.getObject(snapshots[snapshots.length - 2].uuid).$ref
console.log('older snapshot is ', xapi.getObject(snapshotRef).snapshot_time)
console.log('newer one is ', xapi.getObject(snapshotTarget).snapshot_time)
console.log('## will get bitmap of changed blocks')
const cbt = Buffer.from(await xapi.call('VDI.list_changed_blocks', snapshotRef, snapshotTarget), 'base64')
console.log('got changes')
console.log('will connect to NBD server')
const nbd = (await xapi.call('VDI.get_nbd_info', snapshotTarget))[0]
if (!nbd) {
console.error('Nbd is not enabled on the host')
console.error('you should add `insecure_nbd` as the `purpose` of a network of this host')
process.exit()
}
nbd.secure = true
// console.log(nbd)
const client = new NbdClient(nbd)
await client.connect()
// @todo : should also handle last blocks that could be incomplete
const stats = {}
for (const nbBlocksRead of [32, 16, 8, 4, 2, 1]) {
const blockSize = nbBlocksRead * 64 * 1024
stats[blockSize] = {}
const MASK = 0x80
const test = (map, bit) => ((map[bit >> 3] << (bit & 7)) & MASK) !== 0
const changed = []
for (let i = 0; i < (cbt.length * 8) / nbBlocksRead; i++) {
let blockChanged = false
for (let j = 0; j < nbBlocksRead; j++) {
blockChanged = blockChanged || test(cbt, i * nbBlocksRead + j)
}
if (blockChanged) {
changed.push(i)
}
}
console.log(changed.length, 'block changed')
for (const concurrency of [32, 16, 8, 4, 2]) {
const { speed } = await getChangedNbdBlocks(client, changed, concurrency, blockSize)
stats[blockSize][concurrency] = speed
}
}
console.log('speed summary')
console.table(stats)
console.log('## will check full download of the base vdi ')
await getFullBlocks(client, 16, 512 * 1024) // a good sweet spot
console.log('## will check vhd delta export size and speed')
console.log('## will check full vhd export size and speed')
await downloadVhd(xapi, {
format: 'vhd',
vdi: snapshotTarget,
})

View File

@@ -1,97 +0,0 @@
import NbdClient from '../index.js'
import { Xapi } from 'xen-api'
import { asyncMap } from '@xen-orchestra/async-map'
import { downloadVhd, getFullBlocks } from './utils.mjs'
import fs from 'fs/promises'
const xapi = new Xapi({
auth: {
user: 'root',
password: 'vateslab',
},
url: '172.16.210.11',
allowUnauthorized: true,
})
await xapi.connect()
const vmuuid = '123e4f2b-498e-d0af-15ae-f835a1e9f59f'
let vmRef
do {
try {
vmRef = xapi.getObject(vmuuid).$ref
} catch (e) {
console.log('maybe the objects was not loaded, try again ')
await new Promise(resolve => setTimeout(resolve, 1000))
}
} while (!vmRef)
const vdiRefs = (
await asyncMap(await xapi.call('VM.get_VBDs', vmRef), async vbd => {
const vdi = await xapi.call('VBD.get_VDI', vbd)
return vdi
})
).filter(vdiRef => vdiRef !== 'OpaqueRef:NULL')
const vdiRef = vdiRefs[0]
const vdi = xapi.getObject(vdiRef)
console.log('Will work on vdi [', vdi.name_label, ']')
console.log('will search for suitable snapshots')
const snapshots = vdi.snapshots.map(snapshotRef => xapi.getObject(snapshotRef))
console.log('found snapshots will use the last one for tests')
const snapshotRef = xapi.getObject(snapshots[snapshots.length - 1].uuid).$ref
console.log('will connect to NBD server')
const nbd = (await xapi.call('VDI.get_nbd_info', snapshotRef))[0]
if (!nbd) {
console.error('Nbd is not enabled on the host')
console.error('you should add `insecure_nbd` as the `purpose` of a network of this host')
process.exit()
}
if (!nbd) {
console.error('Nbd is not enabled on the host')
console.error('you should add `insecure_nbd` as the `purpose` of a network of this host')
process.exit()
}
const nbdClient = new NbdClient(nbd)
await nbdClient.connect()
let fd = await fs.open('/tmp/nbd.raw', 'w')
await getFullBlocks({
nbdClient,
concurrency: 8,
nbBlocksRead: 16 /* 1MB block */,
fd,
})
console.log(' done nbd ')
await fd.close()
fd = await fs.open('/tmp/export.raw', 'w')
await downloadVhd({
xapi,
query: {
format: 'raw',
vdi: snapshotRef,
},
fd,
})
fd.close()
fd = await fs.open('/tmp/export.vhd', 'w')
await downloadVhd({
xapi,
query: {
format: 'vhd',
vdi: snapshotRef,
},
fd,
})
fd.close()

View File

@@ -1,117 +0,0 @@
import NbdClient from '../index.js'
import { Xapi } from 'xen-api'
import readline from 'node:readline'
import { stdin as input, stdout as output } from 'node:process'
import { asyncMap } from '@xen-orchestra/async-map'
import { downloadVhd, getFullBlocks } from './utils.mjs'
const xapi = new Xapi({
auth: {
user: 'root',
password: 'vateslab',
},
url: '172.16.210.11',
allowUnauthorized: true,
})
await xapi.connect()
const networks = await xapi.call('network.get_all_records')
console.log({ networks })
const nbdNetworks = Object.values(networks).filter(
network => network.purpose.includes('nbd') || network.purpose.includes('insecure_nbd')
)
let secure = false
if (!nbdNetworks.length) {
console.log(`you don't have any nbd enabled network`)
console.log(`please add a purpose of nbd (to use tls) or insecure_nbd to oneof the host network`)
process.exit()
}
const network = nbdNetworks[0]
secure = network.purpose.includes('nbd')
console.log(`we will use network **${network.name_label}** ${secure ? 'with' : 'without'} TLS`)
const rl = readline.createInterface({ input, output })
const question = text => {
return new Promise(resolve => {
rl.question(text, resolve)
})
}
let vmuuid, vmRef
do {
vmuuid = '123e4f2b-498e-d0af-15ae-f835a1e9f59f' // await question('VM uuid ? ')
try {
vmRef = xapi.getObject(vmuuid).$ref
} catch (e) {
console.log(e)
console.log('maybe the objects was not loaded, try again ')
}
} while (!vmRef)
const vdiRefs = (
await asyncMap(await xapi.call('VM.get_VBDs', vmRef), async vbd => {
const vdi = await xapi.call('VBD.get_VDI', vbd)
return vdi
})
).filter(vdiRef => vdiRef !== 'OpaqueRef:NULL')
const vdiRef = vdiRefs[0]
const vdi = xapi.getObject(vdiRef)
console.log('Will work on vdi [', vdi.name_label, ']')
console.log('will search for suitable snapshots')
const snapshots = vdi.snapshots.map(snapshotRef => xapi.getObject(snapshotRef))
console.log('found snapshots will use the last one for tests')
const snapshotRef = xapi.getObject(snapshots[snapshots.length - 1].uuid).$ref
console.log('will connect to NBD server')
const nbd = (await xapi.call('VDI.get_nbd_info', snapshotRef))[0]
if (!nbd) {
console.error('Nbd is not enabled on the host')
console.error('you should add `insecure_nbd` as the `purpose` of a network of this host')
process.exit()
}
nbd.secure = secure
const nbdClient = new NbdClient(nbd)
await nbdClient.connect()
const maxDuration =
parseInt(await question('Maximum duration per test in second ? (-1 for unlimited, default 30) '), 10) || 30
console.log('Will start downloading blocks during ', maxDuration, 'seconds')
console.log('## will check the vhd download speed')
const stats = {}
for (const nbBlocksRead of [32, 16, 8, 4, 2, 1]) {
stats[nbBlocksRead * 64 * 1024] = {}
for (const concurrency of [32, 16, 8, 4, 2]) {
const { speed } = await getFullBlocks({ nbdClient, concurrency, nbBlocksRead })
stats[concurrency] = speed
}
}
console.log('speed summary')
console.table(stats)
console.log('## will check full vhd export size and speed')
await downloadVhd(xapi, {
format: 'vhd',
vdi: snapshotRef,
})
console.log('## will check full raw export size and speed')
await downloadVhd(xapi, {
format: 'raw',
vdi: snapshotRef,
})
process.exit()

View File

@@ -1,116 +0,0 @@
import { asyncEach } from '@vates/async-each'
import { CancelToken } from 'promise-toolbox'
import zlib from 'node:zlib'
export async function getChangedNbdBlocks(nbdClient, changed, concurrency, blockSize) {
let nbModified = 0
let size = 0
let compressedSize = 0
const start = new Date()
console.log('### with concurrency ', concurrency, ' blockSize ', blockSize / 1024 / 1024, 'MB')
const interval = setInterval(() => {
console.log(`${nbModified} block handled in ${new Date() - start} ms`)
}, 5000)
await asyncEach(
changed,
async blockIndex => {
if (new Date() - start > 30000) {
return
}
const data = await nbdClient.readBlock(blockIndex, blockSize)
await new Promise(resolve => {
zlib.gzip(data, { level: zlib.constants.Z_BEST_SPEED }, (_, compressed) => {
compressedSize += compressed.length
resolve()
})
})
size += data?.length ?? 0
nbModified++
},
{
concurrency,
}
)
clearInterval(interval)
console.log('duration :', new Date() - start)
console.log('read : ', size, 'octets, compressed: ', compressedSize, 'ratio ', size / compressedSize)
console.log('speed : ', Math.round(((size / 1024 / 1024) * 1000) / (new Date() - start)), 'MB/s')
return { speed: Math.round(((size / 1024 / 1024) * 1000) / (new Date() - start)) }
}
export async function getFullBlocks({ nbdClient, concurrency = 1, nbBlocksRead = 1, fd, maxDuration = -1 } = {}) {
const blockSize = nbBlocksRead * 64 * 1024
let nbModified = 0
let size = 0
console.log('### with concurrency ', concurrency)
const start = new Date()
console.log(' max nb blocks ', nbdClient.nbBlocks / nbBlocksRead)
function* blockIterator() {
for (let i = 0; i < nbdClient.nbBlocks / nbBlocksRead; i++) {
yield i
}
}
const interval = setInterval(() => {
console.log(`${nbModified} block handled in ${new Date() - start} ms`)
}, 5000)
await asyncEach(
blockIterator(),
async blockIndex => {
if (maxDuration > 0 && new Date() - start > maxDuration * 1000) {
return
}
const data = await nbdClient.readBlock(blockIndex, blockSize)
size += data?.length ?? 0
nbModified++
if (fd) {
await fd.write(data, 0, data.length, blockIndex * blockSize)
}
},
{
concurrency,
}
)
clearInterval(interval)
if (new Date() - start < 10000) {
console.warn(
`data set too small or performance to high, result won't be usefull. Please relaunch with bigger snapshot or higher maximum data size `
)
}
console.log('duration :', new Date() - start)
console.log('nb blocks : ', nbModified)
console.log('read : ', size, 'octets')
const speed = Math.round(((size / 1024 / 1024) * 1000 * 100) / (new Date() - start)) / 100
console.log('speed : ', speed, 'MB/s')
return { speed }
}
export async function downloadVhd({ xapi, query, fd, maxDuration = -1 } = {}) {
const startStream = new Date()
let sizeStream = 0
let nbChunk = 0
const interval = setInterval(() => {
console.log(`${nbChunk} chunks , ${sizeStream} octets handled in ${new Date() - startStream} ms`)
}, 5000)
const stream = await xapi.getResource(CancelToken.none, '/export_raw_vdi/', {
query,
})
for await (const chunk of stream) {
sizeStream += chunk.length
if (fd) {
await fd.write(chunk)
}
nbChunk++
if (maxDuration > 0 && new Date() - startStream > maxDuration * 1000) {
break
}
}
clearInterval(interval)
console.log('Stream duration :', new Date() - startStream)
console.log('Stream read : ', sizeStream, 'octets')
const speed = Math.round(((sizeStream / 1024 / 1024) * 1000 * 100) / (new Date() - startStream)) / 100
console.log('speed : ', speed, 'MB/s')
}

View File

@@ -7,8 +7,8 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.28.0",
"@xen-orchestra/fs": "^3.1.0",
"@xen-orchestra/backups": "^0.27.4",
"@xen-orchestra/fs": "^3.0.0",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
@@ -27,7 +27,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.7.8",
"version": "0.7.7",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -22,15 +22,11 @@ const zlib = require('zlib')
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
const { cleanVm } = require('./_cleanVm.js')
const { formatFilenameDate } = require('./_filenameDate.js')
const { getTmpDir } = require('./_getTmpDir.js')
const { isMetadataFile } = require('./_backupType.js')
const { isValidXva } = require('./_isValidXva.js')
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
const { lvs, pvs } = require('./_lvm.js')
// @todo : this import is marked extraneous , sould be fixed when lib is published
const { mount } = require('@vates/fuse-vhd')
const { asyncEach } = require('@vates/async-each')
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
@@ -38,7 +34,7 @@ exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
const { warn } = createLogger('xo:backups:RemoteAdapter')
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
@@ -48,6 +44,8 @@ const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
const RE_VHDI = /^vhdi(\d+)$/
async function addDirectory(files, realPath, metadataPath) {
const stats = await lstat(realPath)
if (stats.isDirectory()) {
@@ -76,14 +74,12 @@ const debounceResourceFactory = factory =>
}
class RemoteAdapter {
constructor(handler, { debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy=false } = {}) {
constructor(handler, { debounceResource = res => res, dirMode, vhdDirectoryCompression } = {}) {
this._debounceResource = debounceResource
this._dirMode = dirMode
this._handler = handler
this._vhdDirectoryCompression = vhdDirectoryCompression
this._readCacheListVmBackups = synchronized.withKey()(this._readCacheListVmBackups)
this._useGetDiskLegacy = useGetDiskLegacy
}
get handler() {
@@ -131,9 +127,7 @@ class RemoteAdapter {
}
async *_getPartition(devicePath, partition) {
// the norecovery option is necessary because if the partition is dirty,
// mount will try to fix it which is impossible if because the device is read-only
const options = ['loop', 'ro', 'norecovery']
const options = ['loop', 'ro']
if (partition !== undefined) {
const { size, start } = partition
@@ -230,30 +224,11 @@ class RemoteAdapter {
return promise
}
#removeVmBackupsFromCache(backups) {
for (const [dir, filenames] of Object.entries(
groupBy(
backups.map(_ => _._filename),
dirname
)
)) {
// detached async action, will not reject
this._updateCache(dir + '/cache.json.gz', backups => {
for (const filename of filenames) {
debug('removing cache entry', { entry: filename })
delete backups[filename]
}
})
}
}
async deleteDeltaVmBackups(backups) {
const handler = this._handler
// this will delete the json, unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
this.#removeVmBackupsFromCache(backups)
}
async deleteMetadataBackup(backupId) {
@@ -281,8 +256,6 @@ class RemoteAdapter {
await asyncMapSettled(backups, ({ _filename, xva }) =>
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
)
this.#removeVmBackupsFromCache(backups)
}
deleteVmBackup(file) {
@@ -303,13 +276,14 @@ class RemoteAdapter {
full !== undefined && this.deleteFullVmBackups(full),
])
await asyncMap(new Set(files.map(file => dirname(file))), dir =>
// - don't merge in main process, unused VHDs will be merged in the next backup run
// - don't error in case this fails:
// - if lock is already being held, a backup is running and cleanVm will be ran at the end
// - otherwise, there is nothing more we can do, orphan file will be cleaned in the future
this.cleanVm(dir, { remove: true, logWarn: warn }).catch(noop)
)
const dirs = new Set(files.map(file => dirname(file)))
for (const dir of dirs) {
// don't merge in main process, unused VHDs will be merged in the next backup run
await this.cleanVm(dir, { remove: true, logWarn: warn })
}
const dedupedVmUuid = new Set(metadatas.map(_ => _.vm.uuid))
await asyncMap(dedupedVmUuid, vmUuid => this.invalidateVmBackupListCache(vmUuid))
}
#getCompressionType() {
@@ -324,10 +298,7 @@ class RemoteAdapter {
return this.#useVhdDirectory()
}
async *#getDiskLegacy(diskId) {
const RE_VHDI = /^vhdi(\d+)$/
async *getDisk(diskId) {
const handler = this._handler
const diskPath = handler._getFilePath('/' + diskId)
@@ -357,20 +328,6 @@ class RemoteAdapter {
}
}
async *getDisk(diskId) {
if(this._useGetDiskLegacy){
yield * this.#getDiskLegacy(diskId)
return
}
const handler = this._handler
// this is a disposable
const mountDir = yield getTmpDir()
// this is also a disposable
yield mount(handler, diskId, mountDir)
// this will yield disk path to caller
yield `${mountDir}/vhd0`
}
// partitionId values:
//
// - undefined: raw disk
@@ -421,25 +378,22 @@ class RemoteAdapter {
listPartitionFiles(diskId, partitionId, path) {
return Disposable.use(this.getPartition(diskId, partitionId), async rootPath => {
path = resolveSubpath(rootPath, path)
const entriesMap = {}
await asyncEach(
await readdir(path),
async name => {
try {
const stats = await lstat(`${path}/${name}`)
if (stats.isDirectory()) {
entriesMap[name + '/'] = {}
} else if (stats.isFile()) {
entriesMap[name] = {}
}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
await asyncMap(await readdir(path), async name => {
try {
const stats = await lstat(`${path}/${name}`)
if (stats.isDirectory()) {
entriesMap[name + '/'] = {}
} else if (stats.isFile()) {
entriesMap[name] = {}
}
},
{ concurrency: 1 }
)
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
}
})
return entriesMap
})
@@ -504,46 +458,11 @@ class RemoteAdapter {
return backupsByPool
}
#getVmBackupsCache(vmUuid) {
return `${BACKUP_DIR}/${vmUuid}/cache.json.gz`
}
async #readCache(path) {
try {
return JSON.parse(await fromCallback(zlib.gunzip, await this.handler.readFile(path)))
} catch (error) {
if (error.code !== 'ENOENT') {
warn('#readCache', { error, path })
}
}
}
_updateCache = synchronized.withKey()(this._updateCache)
// eslint-disable-next-line no-dupe-class-members
async _updateCache(path, fn) {
const cache = await this.#readCache(path)
if (cache !== undefined) {
fn(cache)
await this.#writeCache(path, cache)
}
}
async #writeCache(path, data) {
try {
await this.handler.writeFile(path, await fromCallback(zlib.gzip, JSON.stringify(data)), { flags: 'w' })
} catch (error) {
warn('#writeCache', { error, path })
}
}
async invalidateVmBackupListCache(vmUuid) {
await this.handler.unlink(this.#getVmBackupsCache(vmUuid))
await this.handler.unlink(`${BACKUP_DIR}/${vmUuid}/cache.json.gz`)
}
async #getCachabledDataListVmBackups(dir) {
debug('generating cache', { path: dir })
const handler = this._handler
const backups = {}
@@ -579,26 +498,41 @@ class RemoteAdapter {
// if cache is missing or broken => regenerate it and return
async _readCacheListVmBackups(vmUuid) {
const path = this.#getVmBackupsCache(vmUuid)
const dir = `${BACKUP_DIR}/${vmUuid}`
const path = `${dir}/cache.json.gz`
const cache = await this.#readCache(path)
if (cache !== undefined) {
debug('found VM backups cache, using it', { path })
return cache
try {
const gzipped = await this.handler.readFile(path)
const text = await fromCallback(zlib.gunzip, gzipped)
return JSON.parse(text)
} catch (error) {
if (error.code !== 'ENOENT') {
warn('Cache file was unreadable', { vmUuid, error })
}
}
// nothing cached, or cache unreadable => regenerate it
const backups = await this.#getCachabledDataListVmBackups(`${BACKUP_DIR}/${vmUuid}`)
const backups = await this.#getCachabledDataListVmBackups(dir)
if (backups === undefined) {
return
}
// detached async action, will not reject
this.#writeCache(path, backups)
this.#writeVmBackupsCache(path, backups)
return backups
}
async #writeVmBackupsCache(cacheFile, backups) {
try {
const text = JSON.stringify(backups)
const zipped = await fromCallback(zlib.gzip, text)
await this.handler.writeFile(cacheFile, zipped, { flags: 'w' })
} catch (error) {
warn('writeVmBackupsCache', { cacheFile, error })
}
}
async listVmBackups(vmUuid, predicate) {
const backups = []
const cached = await this._readCacheListVmBackups(vmUuid)
@@ -637,41 +571,18 @@ class RemoteAdapter {
return backups.sort(compareTimestamp)
}
async writeVmBackupMetadata(vmUuid, metadata) {
const path = `/${BACKUP_DIR}/${vmUuid}/${formatFilenameDate(metadata.timestamp)}.json`
await this.handler.outputFile(path, JSON.stringify(metadata), {
dirMode: this._dirMode,
})
// will not throw
this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
debug('adding cache entry', { entry: path })
backups[path] = {
...metadata,
// these values are required in the cache
_filename: path,
id: path,
}
})
return path
}
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency, nbdClient } = {}) {
async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
const handler = this._handler
if (this.#useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: writeBlockConcurrency,
concurrency: 16,
compression: this.#getCompressionType(),
async validator() {
await input.task
return validator.apply(this, arguments)
},
nbdClient,
})
await VhdAbstract.createAlias(handler, path, dataPath)
} else {

View File

@@ -37,7 +37,7 @@ const computeVhdsSize = (handler, vhdPaths) =>
)
// chain is [ ancestor, child_1, ..., child_n ]
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge }) {
if (merge) {
logInfo(`merging VHD chain`, { chain })
@@ -55,7 +55,6 @@ async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlo
try {
return await mergeVhdChain(handler, chain, {
logInfo,
mergeBlockConcurrency,
onProgress({ done: d, total: t }) {
done = d
total = t
@@ -182,15 +181,7 @@ const defaultMergeLimiter = limitConcurrency(1)
exports.cleanVm = async function cleanVm(
vmDir,
{
fixMetadata,
remove,
merge,
mergeBlockConcurrency,
mergeLimiter = defaultMergeLimiter,
logInfo = noop,
logWarn = console.warn,
}
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, logInfo = noop, logWarn = console.warn }
) {
const limitedMergeVhdChain = mergeLimiter(_mergeVhdChain)
@@ -311,7 +302,6 @@ exports.cleanVm = async function cleanVm(
}
const jsons = new Set()
let mustInvalidateCache = false
const xvas = new Set()
const xvaSums = []
const entries = await handler.list(vmDir, {
@@ -360,7 +350,6 @@ exports.cleanVm = async function cleanVm(
if (remove) {
logInfo('deleting incomplete backup', { path: json })
jsons.delete(json)
mustInvalidateCache = true
await handler.unlink(json)
}
}
@@ -383,7 +372,6 @@ exports.cleanVm = async function cleanVm(
logWarn('some VHDs linked to the backup are missing', { backup: json, missingVhds })
if (remove) {
logInfo('deleting incomplete backup', { path: json })
mustInvalidateCache = true
jsons.delete(json)
await handler.unlink(json)
}
@@ -456,13 +444,7 @@ exports.cleanVm = async function cleanVm(
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(handler, chain, {
logInfo,
logWarn,
remove,
merge,
mergeBlockConcurrency,
})
const merged = await limitedMergeVhdChain(handler, chain, { logInfo, logWarn, remove, merge })
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
@@ -546,11 +528,6 @@ exports.cleanVm = async function cleanVm(
}
})
// purge cache if a metadata file has been deleted
if (mustInvalidateCache) {
await handler.unlink(vmDir + '/cache.json.gz')
}
return {
// boolean whether some VHDs were merged (or should be merged)
merge: toMerge.length !== 0,

View File

@@ -1,12 +1,12 @@
'use strict'
const compareVersions = require('compare-versions')
const find = require('lodash/find.js')
const groupBy = require('lodash/groupBy.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const omit = require('lodash/omit.js')
const { asyncMap } = require('@xen-orchestra/async-map')
const { CancelToken } = require('promise-toolbox')
const { compareVersions } = require('compare-versions')
const { createVhdStreamWithLength } = require('vhd-lib')
const { defer } = require('golike-defer')

View File

@@ -21,7 +21,7 @@
└─ xo-vm-backups
├─ index.json // TODO
└─ <VM UUID>
├─ cache.json.gz
├─ index.json // TODO
├─ vdis
│ └─ <job UUID>
│ └─ <VDI UUID>
@@ -45,18 +45,6 @@ When `useVhdDirectory` is enabled on the remote, the directory containing the VH
└─ <uuid>.vhd
```
## Cache for a VM
In a VM directory, if the file `cache.json.gz` exists, it contains the metadata for all the backups for this VM.
Add the following file: `xo-vm-backups/<VM UUID>/cache.json.gz`.
This cache is compressed in Gzip and contains an JSON object with the metadata for all the backups of this VM indexed by their absolute path (i.e. `/xo-vm-backups/<VM UUID>/<timestamp>.json`).
This file is generated on demande when listing the backups, and directly updated on backup creation/deletion.
In case any incoherence is detected, the file is deleted so it will be fully generated when required.
## Attributes
### Of created snapshots

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.28.0",
"version": "0.27.4",
"engines": {
"node": ">=14.6"
},
@@ -16,18 +16,16 @@
"postversion": "npm publish --access public"
},
"dependencies": {
"@vates/async-each": "^1.0.0",
"@vates/cached-dns.lookup": "^1.0.0",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.1",
"@vates/fuse-vhd": "^1.0.0",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^3.1.0",
"@xen-orchestra/fs": "^3.0.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^5.0.1",
"compare-versions": "^4.0.1",
"d3-time-format": "^3.0.0",
"decorator-synchronized": "^0.6.0",
"end-of-stream": "^1.4.4",
@@ -39,8 +37,8 @@
"parse-pairs": "^1.1.0",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"uuid": "^9.0.0",
"vhd-lib": "^4.1.0",
"uuid": "^8.3.2",
"vhd-lib": "^4.0.0",
"yazl": "^2.5.1"
},
"devDependencies": {
@@ -48,7 +46,7 @@
"tmp": "^0.2.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^1.5.0"
"@xen-orchestra/xapi": "^1.4.2"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -19,7 +19,9 @@ const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
const { checkVhd } = require('./_checkVhd.js')
const { packUuid } = require('./_packUuid.js')
const { Disposable } = require('promise-toolbox')
const NbdClient = require('nbd-client')
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup.js')
const { ImportVmBackup } = require('../ImportVmBackup.js')
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
@@ -36,7 +38,6 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
try {
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
ignoreMissing: true,
prependDir: true,
})
const packedBaseUuid = packUuid(baseUuid)
@@ -70,6 +71,35 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
return this._cleanVm({ merge: true })
}
healthCheck(sr) {
return Task.run(
{
name: 'health check',
},
async () => {
const xapi = sr.$xapi
const srUuid = sr.uuid
const adapter = this._adapter
const metadata = await adapter.readVmBackupMetadata(this._metadataFileName)
const { id: restoredId } = await new ImportVmBackup({
adapter,
metadata,
srUuid,
xapi,
}).run()
const restoredVm = xapi.getObject(restoredId)
try {
await new HealthCheckVmBackup({
restoredVm,
xapi,
}).run()
} finally {
await xapi.VM_destroy(restoredVm.$ref)
}
}
)
}
prepare({ isFull }) {
// create the task related to this export and ensure all methods are called in this context
const task = new Task({
@@ -159,6 +189,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}/${adapter.getVhdFileName(basename)}`
)
const metadataFilename = (this._metadataFileName = `${backupDir}/${basename}.json`)
const metadataContent = {
jobId,
mode: job.mode,
@@ -199,24 +230,11 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
await checkVhd(handler, parentPath)
}
// get nbd if possible
const vdiRef = vm.$xapi.getObject(vdi.uuid).$ref
let nbdClient
try {
const [nbdInfo] = await vm.$xapi.call('VDI.get_nbd_info', vdiRef)
nbdClient = new NbdClient(nbdInfo)
await nbdClient.connect()
} catch (e) {
console.log('NBD error', e)
}
await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._backup.config.writeBlockConcurrency,
nbdClient,
})
if (isDelta) {
@@ -236,7 +254,9 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}
})
metadataContent.size = size
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadataContent)
await handler.outputFile(metadataFilename, JSON.stringify(metadataContent), {
dirMode: backup.config.dirMode,
})
// TODO: run cleanup?
}

View File

@@ -34,6 +34,7 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
const { job, scheduleId, vm } = backup
const adapter = this._adapter
const handler = adapter.handler
const backupDir = getVmBackupDir(vm.uuid)
// TODO: clean VM backup directory
@@ -73,7 +74,9 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
return { size: sizeContainer.size }
})
metadata.size = sizeContainer.size
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadata)
await handler.outputFile(metadataFilename, JSON.stringify(metadata), {
dirMode: backup.config.dirMode,
})
if (!deleteFirst) {
await deleteOldBackups()

View File

@@ -3,13 +3,10 @@
const { createLogger } = require('@xen-orchestra/log')
const { join } = require('path')
const assert = require('assert')
const { formatFilenameDate } = require('../_filenameDate.js')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup.js')
const { ImportVmBackup } = require('../ImportVmBackup.js')
const { Task } = require('../Task.js')
const MergeWorker = require('../merge-worker/index.js')
const { formatFilenameDate } = require('../_filenameDate.js')
const { Task } = require('../Task.js')
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
@@ -39,7 +36,6 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
Task.warning(message, data)
},
lock: false,
mergeBlockConcurrency: this._backup.config.mergeBlockConcurrency,
})
})
} catch (error) {
@@ -75,39 +71,6 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
const remotePath = handler._getRealPath()
await MergeWorker.run(remotePath)
}
}
healthCheck(sr) {
assert.notStrictEqual(
this._metadataFileName,
undefined,
'Metadata file name should be defined before making a healthcheck'
)
return Task.run(
{
name: 'health check',
},
async () => {
const xapi = sr.$xapi
const srUuid = sr.uuid
const adapter = this._adapter
const metadata = await adapter.readVmBackupMetadata(this._metadataFileName)
const { id: restoredId } = await new ImportVmBackup({
adapter,
metadata,
srUuid,
xapi,
}).run()
const restoredVm = xapi.getObject(restoredId)
try {
await new HealthCheckVmBackup({
restoredVm,
xapi,
}).run()
} finally {
await xapi.VM_destroy(restoredVm.$ref)
}
}
)
await this._adapter.invalidateVmBackupListCache(this._backup.vm.uuid)
}
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "3.1.0",
"version": "3.0.0",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",

View File

@@ -14,7 +14,7 @@
"url": "https://vates.fr"
},
"license": "AGPL-3.0-or-later",
"version": "0.8.0",
"version": "0.7.1",
"engines": {
"node": ">=15.6"
},
@@ -24,7 +24,7 @@
"@xen-orchestra/emit-async": "^1.0.0",
"@xen-orchestra/log": "^0.3.0",
"acme-client": "^5.0.0",
"app-conf": "^2.3.0",
"app-conf": "^2.1.0",
"lodash": "^4.17.21",
"promise-toolbox": "^0.21.0"
},

View File

@@ -28,7 +28,7 @@
"@iarna/toml": "^2.2.0",
"@vates/read-chunk": "^1.0.0",
"ansi-colors": "^4.1.1",
"app-conf": "^2.3.0",
"app-conf": "^2.1.0",
"content-type": "^1.0.4",
"cson-parser": "^4.0.7",
"getopts": "^2.2.3",

View File

@@ -15,20 +15,6 @@ import { createLogger } from '@xen-orchestra/log'
const { debug, warn } = createLogger('xo:proxy:api')
// format an error to JSON-RPC but do not hide non JSON-RPC errors
function formatError(responseId, error) {
if (error != null && typeof error.toJsonRpcError !== 'function') {
const { message, ...data } = error
// force these entries even if they are not enumerable
data.code = error.code
data.stack = error.stack
error = new JsonRpcError(error.message, undefined, data)
}
return format.error(responseId, error)
}
const ndJsonStream = asyncIteratorToStream(async function* (responseId, iterable) {
try {
let cursor, iterator
@@ -39,7 +25,7 @@ const ndJsonStream = asyncIteratorToStream(async function* (responseId, iterable
cursor = await iterator.next()
yield format.response(responseId, { $responseType: 'ndjson' }) + '\n'
} catch (error) {
yield formatError(responseId, error)
yield format.error(responseId, error)
throw error
}
@@ -78,7 +64,7 @@ export default class Api {
try {
body = parse(body)
} catch (error) {
ctx.body = formatError(null, error)
ctx.body = format.error(null, error)
return
}
@@ -92,7 +78,19 @@ export default class Api {
const { method, params } = body
warn('call error', { method, params, error })
ctx.set('Content-Type', 'application/json')
ctx.body = formatError(body.id, error)
let e = error
if (error != null && typeof error.toJsonRpcError !== 'function') {
const { message, ...data } = error
// force these entries even if they are not enumerable
data.code = error.code
data.stack = error.stack
e = new JsonRpcError(error.message, undefined, data)
}
ctx.body = format.error(body.id, e)
return
}

View File

@@ -407,7 +407,6 @@ export default class Backups {
debounceResource: app.debounceResource.bind(app),
dirMode: app.config.get('backups.dirMode'),
vhdDirectoryCompression: app.config.get('backups.vhdDirectoryCompression'),
useGetDiskLegacy: app.config.getOptional('backups.useGetDiskLegacy'),
})
}

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.26.2",
"version": "0.26.0",
"license": "AGPL-3.0-or-later",
"description": "XO Proxy used to remotely execute backup jobs",
"keywords": [
@@ -32,27 +32,27 @@
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.28.0",
"@xen-orchestra/fs": "^3.1.0",
"@xen-orchestra/backups": "^0.27.4",
"@xen-orchestra/fs": "^3.0.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.8.0",
"@xen-orchestra/mixins": "^0.7.1",
"@xen-orchestra/self-signed": "^0.1.3",
"@xen-orchestra/xapi": "^1.5.0",
"@xen-orchestra/xapi": "^1.4.2",
"ajv": "^8.0.3",
"app-conf": "^2.3.0",
"app-conf": "^2.1.0",
"async-iterator-to-stream": "^1.1.0",
"fs-extra": "^10.0.0",
"get-stream": "^6.0.0",
"getopts": "^2.2.3",
"golike-defer": "^0.5.1",
"http-server-plus": "^0.12.0",
"http-server-plus": "^0.11.1",
"http2-proxy": "^5.0.53",
"json-rpc-protocol": "^0.13.1",
"jsonrpc-websocket-client": "^0.7.2",
"koa": "^2.5.1",
"koa-compress": "^5.0.1",
"koa-helmet": "^6.1.0",
"koa-helmet": "^5.1.0",
"lodash": "^4.17.10",
"node-zone": "^0.4.0",
"parse-pairs": "^1.0.0",

View File

@@ -0,0 +1,90 @@
let indentLevel = 0
function indent() {
return ' '.repeat(indentLevel)
}
function quoteId(name) {
return /^[a-z0-9_]+$/i.test(name) ? name : JSON.stringify(name)
}
function genType(type, schema) {
if (type === 'array' && schema.items !== undefined) {
const { items } = schema
if (Array.isArray(items)) {
if (items.length !== 0) {
return ['[' + items.map(genTs).join(', ') + ']']
}
} else {
const { type } = items
if (type !== undefined && type.length !== 0) {
return genTs(items, true) + '[]'
} else {
return 'unknown[]'
}
}
}
if (type !== 'object') {
return type
}
const code = []
const { title } = schema
const isInterface = title !== undefined
if (isInterface) {
code.push('interface ', title, ' ')
}
const fieldDelimiter = (isInterface ? ';' : ',') + '\n'
const { additionalProperties, properties } = schema
const hasAdditionalProperties = additionalProperties?.type !== undefined
const propertiesKeys = Object.keys(properties ?? {})
if (!hasAdditionalProperties && propertiesKeys.length === 0) {
code.push('{}')
return code.join('')
}
code.push('{\n')
++indentLevel
for (const name of propertiesKeys.sort()) {
const schema = properties[name]
code.push(indent(), quoteId(name))
if (schema.optional) {
code.push('?')
}
code.push(': ')
code.push(genTs(schema))
code.push(fieldDelimiter)
}
if (hasAdditionalProperties) {
code.push(indent(), '[key: string]: ', genTs(additionalProperties), fieldDelimiter)
}
--indentLevel
code.push(indent(), '}')
return code.join('')
}
export function genTs(schema, groupMultiple = false) {
let { type } = schema
if (Array.isArray(type)) {
if (type.length !== 1) {
const code = type
.sort()
.map(type => genType(type, schema))
.join(' | ')
return groupMultiple ? '(' + code + ')' : code
}
type = type[0]
}
return genType(type, schema)
}

View File

@@ -0,0 +1,110 @@
const JSON_TYPES = {
__proto__: null,
array: true,
boolean: true,
null: true,
number: true,
object: true,
string: true,
}
function addType(schema, type) {
const previous = schema.type
if (previous === undefined) {
schema.type = type
} else if (Array.isArray(previous)) {
if (previous.indexOf(type) === -1) {
previous.push(type)
}
} else if (previous !== type) {
schema.type = [previous, type]
}
}
function getType(value) {
let type = typeof value
if (type === 'object') {
if (value === null) {
type = 'null'
} else if (Array.isArray(value)) {
type = 'array'
}
}
if (type in JSON_TYPES) {
return type
}
throw new TypeError('unsupported type: ' + type)
}
// like Math.max but v1 can be undefined
const max = (v1, v2) => (v1 > v2 ? v1 : v2)
// like Math.min but v1 can be undefined
const min = (v1, v2) => (v1 < v2 ? v1 : v2)
function updateSchema_(path, value, schema = { __proto__: null }, getOption) {
if (value === undefined) {
schema.optional = true
} else {
const type = getType(value)
addType(schema, type)
if (type === 'array') {
const items = schema.items ?? (schema.items = { __proto__: null })
const pathLength = path.length
if (Array.isArray(items)) {
for (let i = 0, n = value.length; i < n; ++i) {
path[pathLength] = i
items[i] = updateSchema_(path, value[i], items[i], getOption)
}
} else {
for (let i = 0, n = value.length; i < n; ++i) {
path[pathLength] = i
updateSchema_(path, value[i], items, getOption)
}
}
path.length = pathLength
} else if (type === 'number') {
if (getOption('computeMinimum', path)) {
schema.minimum = min(schema.minimum, value)
}
if (getOption('computeMaximum', path)) {
schema.maximum = max(schema.maximum, value)
}
} else if (type === 'object') {
const pathLength = path.length
const { additionalProperties } = schema
if (typeof additionalProperties === 'object') {
for (const key of Object.keys(value)) {
path[pathLength] = key
updateSchema_(path, value[key], additionalProperties, getOption)
}
} else {
const properties = schema.properties ?? (schema.properties = { __proto__: null })
// handle missing properties
for (const key of Object.keys(properties)) {
if (!Object.hasOwn(value, key)) {
properties[key].optional = true
}
}
// handle existing properties
for (const key of Object.keys(value)) {
path[pathLength] = key
properties[key] = updateSchema_(path, value[key], properties[key], getOption)
}
}
path.length = pathLength
}
}
return schema
}
export function updateSchema(value, schema, options) {
const getOption = options == null ? Function.prototype : typeof options === 'object' ? opt => options[opt] : options
return updateSchema_([], value, schema, getOption)
}

View File

@@ -0,0 +1,65 @@
import { readFileSync } from 'fs'
import { genTs } from './_genTs.mjs'
import { updateSchema } from './_updateSchema.mjs'
const upperCamelCase = s =>
s
.split(/[^a-zA-Z]+/)
.map(s => s[0].toUpperCase() + s.slice(1).toLocaleLowerCase())
.join('')
const objects = JSON.parse(readFileSync('./objects.json'))
for (const type of Object.keys(objects).sort()) {
const schema = {
__proto__: null,
title: upperCamelCase(type),
type: 'object',
properties: {
assigned_ips: {
additionalProperties: {},
},
bios_strings: {
additionalProperties: {},
},
features: {
additionalProperties: {},
},
license_params: {
additionalProperties: {},
},
networks: {
additionalProperties: {},
},
other_config: {
additionalProperties: {},
},
other: {
additionalProperties: {},
},
restrictions: {
additionalProperties: {},
},
sm_config: {
additionalProperties: {},
},
xenstore_data: {
additionalProperties: {},
},
VCPUs_utilisation: {
additionalProperties: {},
},
},
}
for (const object of Object.values(objects[type])) {
updateSchema(object, schema)
}
for (const name of Object.keys(schema.properties)) {
if (schema.properties[name].type === undefined) {
delete schema.properties[name]
}
}
console.log(genTs(schema))
}

View File

@@ -1,10 +1,10 @@
{
"private": true,
"name": "@vates/nbd-client",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/nbd-client",
"name": "@xen-orchestra/xapi-typegen",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi-typegen",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/nbd-client",
"directory": "@xen-orchestra/xapi-typegen",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -13,18 +13,8 @@
"url": "https://vates.fr"
},
"license": "AGPL-3.0-or-later",
"version": "0.0.1",
"version": "0.0.0",
"engines": {
"node": ">=14.0"
},
"dependencies": {
"@vates/async-each": "^1.0.0",
"@xen-orchestra/async-map": "^0.1.2",
"promise-toolbox": "^0.21.0",
"xen-api": "^1.2.2"
},
"devDependencies": {
"tap": "^16.3.0",
"tmp": "^0.2.1"
"node": ">=16.9"
}
}

View File

@@ -1,131 +0,0 @@
# VM Sync Hook
> This feature is currently _unstable_ and might change or be removed in the future.
>
> Feedbacks are very welcome on the [project bugtracker](https://github.com/vatesfr/xen-orchestra/issues).
> This feature is not currently supported for backups done with XO Proxy.
Before snapshotting (with or without memory, ie checkpoint), XO can notify the VM via an HTTP request.
A typical use case is to make sure the VM is in a consistent state during the snapshot process, for instance by making sure database writes are flushed to the disk.
> This request will only be sent if the VM is in a running state.
## Configuration
The feature is opt-in via a tag on the VM: `xo:notify-on-snapshot`.
By default, it will be an HTTPS request on the port `1727`, on the first IP address reported by the VM.
If the _VM Tools_ (i.e. management agent) are not installed on the VM or if you which to use another URL, you can specify this in the tag: `xo:notify-on-snapshot=<URL>`.
To guarantee the request comes from XO, a secret must be provided in the `xo-server`'s (and `xo-proxy` if relevant) configuration:
```toml
[xapiOptions]
syncHookSecret = 'unique long string to ensure the request comes from XO'
```
## Specification
XO will waits for the request to be answered before starting the snapshot, but will not wait longer than _1 minute_.
If the request fails for any reason, XO will go ahead with snapshot immediately.
```http
GET /sync HTTP/1.1
Authorization: Bearer dW5pcXVlIGxvbmcgc3RyaW5nIHRvIGVuc3VyZSB0aGUgcmVxdWVzdCBjb21lcyBmcm9tIFhP
```
When the snapshot is finished, another request will be sent:
```http
GET /post-sync HTTP/1.1
Authorization: Bearer dW5pcXVlIGxvbmcgc3RyaW5nIHRvIGVuc3VyZSB0aGUgcmVxdWVzdCBjb21lcyBmcm9tIFhP
```
The created snapshot will have the special `xo:synced` tag set to make it identifiable.
## Example server in Node
`index.cjs`:
```js
const exec = require('node:util').promisify(require('node:child_process').execFile)
const SECRET = 'unique long string to ensure the request comes from XO'
const HANDLERS = {
__proto__: null,
async '/sync'() {
// actions to do before the VM is snapshotted
// in this example, the Linux command `sync` is called:
await exec('sync')
},
async '/post-sync'() {
// actions to do after the VM is snapshotted
},
}
function checkAuthorization(req) {
try {
const { authorization } = req.headers
if (authorization !== undefined) {
const parts = authorization.split(' ')
if (parts.length >= 1 && parts[0].toLowerCase() === 'bearer') {
return Buffer.from(parts[1], 'base64').toString() === SECRET
}
}
} catch (error) {
console.warn('checkAuthorization', error)
}
return false
}
async function main() {
// generate a self-signed certificate
const [, key, cert] =
/^(-----BEGIN PRIVATE KEY-----.+-----END PRIVATE KEY-----\n)(-----BEGIN CERTIFICATE-----.+-----END CERTIFICATE-----\n)$/s.exec(
(await exec('openssl', ['req', '-batch', '-new', '-x509', '-nodes', '-newkey', 'rsa:2048', '-keyout', '-']))
.stdout
)
const server = require('node:https').createServer({ cert, key }, async function onRequest(req, res) {
if (!checkAuthorization(req)) {
res.statusCode = 403
return res.end('Forbidden')
}
const handler = HANDLERS[req.url.split('?')[0]]
if (handler === undefined || req.method !== 'GET') {
res.statusCode = 404
return res.end('Not Found')
}
try {
await handler()
res.statusCode = 200
res.end('Ok')
} catch (error) {
console.warn(error)
if (!res.headersSent) {
res.statusCode = 500
res.write('Internal Error')
}
res.end()
}
})
await new Promise((resolve, reject) => {
server.on('close', resolve).on('error', reject).listen(1727)
})
}
main().catch(console.warn)
```

View File

@@ -102,8 +102,6 @@ class Xapi extends Base {
constructor({
callRetryWhenTooManyPendingTasks = { delay: 5e3, tries: 10 },
maxUncoalescedVdis,
syncHookSecret,
syncHookTimeout,
vdiDestroyRetryWhenInUse = { delay: 5e3, tries: 10 },
...opts
}) {
@@ -114,8 +112,6 @@ class Xapi extends Base {
when: { code: 'TOO_MANY_PENDING_TASKS' },
}
this._maxUncoalescedVdis = maxUncoalescedVdis
this._syncHookSecret = syncHookSecret
this._syncHookTimeout = syncHookTimeout
this._vdiDestroyRetryWhenInUse = {
...vdiDestroyRetryWhenInUse,
onRetry,

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/xapi",
"version": "1.5.0",
"version": "1.4.2",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
@@ -26,11 +26,10 @@
"@xen-orchestra/log": "^0.3.0",
"d3-time-format": "^3.0.0",
"golike-defer": "^0.5.1",
"http-request-plus": "^0.14.0",
"json-rpc-protocol": "^0.13.2",
"lodash": "^4.17.15",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^4.1.0",
"vhd-lib": "^4.0.0",
"xo-common": "^0.8.0"
},
"private": false,

View File

@@ -2,7 +2,6 @@
const CancelToken = require('promise-toolbox/CancelToken')
const groupBy = require('lodash/groupBy.js')
const hrp = require('http-request-plus')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const pickBy = require('lodash/pickBy.js')
const omit = require('lodash/omit.js')
@@ -47,31 +46,6 @@ const cleanBiosStrings = biosStrings => {
}
}
// See: https://github.com/xapi-project/xen-api/blob/324bc6ee6664dd915c0bbe57185f1d6243d9ed7e/ocaml/xapi/xapi_guest_agent.ml#L59-L81
//
// Returns <min(n)>/ip || <min(n)>/ipv4/<min(m)> || <min(n)>/ipv6/<min(m)> || undefined
// where n corresponds to the network interface and m to its IP
const IPV4_KEY_RE = /^\d+\/ip(?:v4\/\d+)?$/
const IPV6_KEY_RE = /^\d+\/ipv6\/\d+$/
function getVmAddress(networks) {
if (networks !== undefined) {
let ipv6
for (const key of Object.keys(networks).sort()) {
if (IPV4_KEY_RE.test(key)) {
return networks[key]
}
if (ipv6 === undefined && IPV6_KEY_RE.test(key)) {
ipv6 = networks[key]
}
}
if (ipv6 !== undefined) {
return ipv6
}
}
throw new Error('no VM address found')
}
async function listNobakVbds(xapi, vbdRefs) {
const vbds = []
await asyncMap(vbdRefs, async vbdRef => {
@@ -158,51 +132,6 @@ class Vm {
}
}
async _httpHook({ guest_metrics, power_state, tags, uuid }, pathname) {
if (power_state !== 'Running') {
return
}
let url
let i = tags.length
do {
if (i === 0) {
return
}
const tag = tags[--i]
if (tag === 'xo:notify-on-snapshot') {
const { networks } = await this.getRecord('VM_guest_metrics', guest_metrics)
url = Object.assign(new URL('https://locahost'), {
hostname: getVmAddress(networks),
port: 1727,
})
} else {
const prefix = 'xo:notify-on-snapshot='
if (tag.startsWith(prefix)) {
url = new URL(tag.slice(prefix.length))
}
}
} while (url === undefined)
url.pathname = pathname
const headers = {}
const secret = this._asyncHookSecret
if (secret !== undefined) {
headers.authorization = 'Bearer ' + Buffer.from(secret).toString('base64')
}
try {
await hrp(url, {
headers,
rejectUnauthorized: false,
timeout: this._syncHookTimeout ?? 60e3,
})
} catch (error) {
warn('HTTP hook failed', { error, url, vm: uuid })
}
}
async assertHealthyVdiChains(vmRef, tolerance = this._maxUncoalescedVdis) {
const vdiRefs = {}
;(await this.getRecords('VBD', await this.getField('VM', vmRef, 'VBDs'))).forEach(({ VDI: ref }) => {
@@ -219,8 +148,6 @@ class Vm {
async checkpoint($defer, vmRef, { cancelToken = CancelToken.none, ignoreNobakVdis = false, name_label } = {}) {
const vm = await this.getRecord('VM', vmRef)
await this._httpHook(vm, '/sync')
let destroyNobakVdis = false
if (ignoreNobakVdis) {
@@ -241,9 +168,6 @@ class Vm {
try {
const ref = await this.callAsync(cancelToken, 'VM.checkpoint', vmRef, name_label).then(extractOpaqueRef)
// detached async
this._httpHook(vm, '/post-sync').catch(noop)
// VM checkpoints are marked as templates, unfortunately it does not play well with XVA export/import
// which will import them as templates and not VM checkpoints or plain VMs
await pCatch.call(
@@ -620,8 +544,6 @@ class Vm {
) {
const vm = await this.getRecord('VM', vmRef)
await this._httpHook(vm, '/sync')
const isHalted = vm.power_state === 'Halted'
// requires the VM to be halted because it's not possible to re-plug VUSB on a live VM
@@ -724,9 +646,6 @@ class Vm {
ref = await this.callAsync(cancelToken, 'VM.snapshot', vmRef, name_label).then(extractOpaqueRef)
} while (false)
// detached async
this._httpHook(vm, '/post-sync').catch(noop)
// VM snapshots are marked as templates, unfortunately it does not play well with XVA export/import
// which will import them as templates and not VM snapshots or plain VMs
await pCatch.call(

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -5,8 +5,16 @@
### Enhancements
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Dashboard/Health] Detect broken VHD chains and display missing parent VDIs (PR [#6356](https://github.com/vatesfr/xen-orchestra/pull/6356))
- [Proxy] Ability to bind a licence to an existing proxy (PR [#6348](https://github.com/vatesfr/xen-orchestra/pull/6348))
- [Backup] Implement encryption for backup files on storage (PR [#6321](https://github.com/vatesfr/xen-orchestra/pull/6321))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
### Packages to release
> When modifying a package, add it here with its release type.
@@ -23,4 +31,10 @@
<!--packages-start-->
- @xen-orchestra/fs minor
- @xen-orchestra/mixins minor
- vhd-lib patch
- xo-server minor
- xo-web minor
<!--packages-end-->

View File

@@ -4,12 +4,12 @@ FROM ubuntu:xenial
# https://qastack.fr/programming/25899912/how-to-install-nvm-in-docker
RUN apt-get update
RUN apt-get install -y curl qemu-utils blktap-utils vmdk-stream-converter git libxml2-utils nbdkit g++
RUN apt-get install -y curl qemu-utils blktap-utils vmdk-stream-converter git libxml2-utils
ENV NVM_DIR /usr/local/nvm
RUN mkdir -p /usr/local/nvm
RUN cd /usr/local/nvm
RUN curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash
ENV NODE_VERSION v16.10.0
ENV NODE_VERSION v17.0.1
RUN /bin/bash -c "source $NVM_DIR/nvm.sh && nvm install $NODE_VERSION && nvm use --delete-prefix $NODE_VERSION"
ENV NODE_PATH $NVM_DIR/versions/node/$NODE_VERSION/lib/node_modules

View File

@@ -28,7 +28,7 @@ hostname = '0.0.0.0'
port = 80
```
## HTTPS and certificates
## HTTPS
XO-server can also run in HTTPS (you can run HTTP and HTTPS at the same time) - just modify what's needed in the `# Basic HTTPS` section, this time with the certificates/keys you need and their path:

View File

@@ -109,6 +109,10 @@ In the "Settings" then "Plugins" view, expand the SAML plugin configuration. The
Save the configuration and then activate the plugin (button on top).
:::warning
When registering your instance to your identity provider, you must configure its callback URL to `http://xo.example.net/signin/saml/callback`!
:::
### GitHub
This plugin allows GitHub users to authenticate to Xen-Orchestra.

View File

@@ -3,7 +3,7 @@
"@babel/core": "^7.0.0",
"@babel/eslint-parser": "^7.13.8",
"@babel/register": "^7.0.0",
"babel-jest": "^29.0.3",
"babel-jest": "^28.1.2",
"benchmark": "^2.1.4",
"deptree": "^1.0.0",
"eslint": "^8.7.0",
@@ -19,7 +19,7 @@
"globby": "^13.1.1",
"handlebars": "^4.7.6",
"husky": "^4.2.5",
"jest": "^29.0.3",
"jest": "^28.1.2",
"lint-staged": "^13.0.3",
"lodash": "^4.17.4",
"prettier": "^2.0.5",

View File

@@ -23,15 +23,15 @@
"node": ">=10"
},
"dependencies": {
"@xen-orchestra/fs": "^3.1.0",
"@xen-orchestra/fs": "^3.0.0",
"cli-progress": "^3.1.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"human-format": "^1.0.0",
"lodash": "^4.17.21",
"promise-toolbox": "^0.21.0",
"uuid": "^9.0.0",
"vhd-lib": "^4.1.0"
"uuid": "^8.3.2",
"vhd-lib": "^4.0.0"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -360,38 +360,4 @@ exports.VhdAbstract = class VhdAbstract {
}
return true
}
async readRawData(start, length, cache, buf) {
const header = this.header
const blockSize = header.blockSize
const startBlockId = Math.floor(start / blockSize)
const endBlockId = Math.floor((start + length) / blockSize)
const startOffset = start % blockSize
let copied = 0
for (let blockId = startBlockId; blockId <= endBlockId; blockId++) {
let data
if (this.containsBlock(blockId)) {
if (!cache.has(blockId)) {
cache.set(
blockId,
// promise is awaited later, so it won't generate unbounded error
this.readBlock(blockId).then(block => {
return block.data
})
)
}
// the cache contains a promise
data = await cache.get(blockId)
} else {
data = Buffer.alloc(blockSize, 0)
}
const offsetStart = blockId === startBlockId ? startOffset : 0
const offsetEnd = blockId === endBlockId ? (start + length) % blockSize : blockSize
data.copy(buf, copied, offsetStart, offsetEnd)
copied += offsetEnd - offsetStart
}
assert.strictEqual(copied, length, 'invalid length')
return copied
}
}

View File

@@ -8,10 +8,10 @@ const { asyncEach } = require('@vates/async-each')
const { warn } = createLogger('vhd-lib:createVhdDirectoryFromStream')
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression, nbdClient }) {
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression }) {
const vhd = yield VhdDirectory.create(handler, path, { compression })
await asyncEach(
parseVhdStream(inputStream, nbdClient),
parseVhdStream(inputStream),
async function (item) {
switch (item.type) {
case 'footer':
@@ -44,10 +44,10 @@ exports.createVhdDirectoryFromStream = async function createVhdDirectoryFromStre
handler,
path,
inputStream,
{ validator, concurrency = 16, compression, nbdClient } = {}
{ validator, concurrency = 16, compression } = {}
) {
try {
await buildVhd(handler, path, inputStream, { concurrency, compression, nbdClient })
await buildVhd(handler, path, inputStream, { concurrency, compression })
if (validator !== undefined) {
await validator.call(this, path)
}

View File

@@ -78,7 +78,7 @@ module.exports._cleanupVhds = cleanupVhds
module.exports.mergeVhdChain = limitConcurrency(2)(async function mergeVhdChain(
handler,
chain,
{ onProgress = noop, logInfo = noop, removeUnused = false, mergeBlockConcurrency = 2 } = {}
{ onProgress = noop, logInfo = noop, removeUnused = false } = {}
) {
assert(chain.length >= 2)
@@ -123,8 +123,7 @@ module.exports.mergeVhdChain = limitConcurrency(2)(async function mergeVhdChain(
childIsVhdDirectory = childVhd instanceof VhdDirectory
}
// merging vhdFile must not be concurrently with the potential block reordering after a change
const concurrency = parentIsVhdDirectory && childIsVhdDirectory ? mergeBlockConcurrency : 1
const concurrency = parentIsVhdDirectory && childIsVhdDirectory ? 2 : 1
if (mergeState === undefined) {
// merge should be along a vhd chain
assert.strictEqual(UUID.stringify(childVhd.header.parentUuid), UUID.stringify(parentVhd.footer.uuid))

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "vhd-lib",
"version": "4.1.0",
"version": "4.0.0",
"license": "AGPL-3.0-or-later",
"description": "Primitives for VHD file handling",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
@@ -18,7 +18,7 @@
"@vates/async-each": "^1.0.0",
"@vates/read-chunk": "^1.0.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^3.1.0",
"@xen-orchestra/fs": "^3.0.0",
"@xen-orchestra/log": "^0.3.0",
"async-iterator-to-stream": "^1.0.2",
"decorator-synchronized": "^0.6.0",
@@ -28,10 +28,10 @@
"promise-toolbox": "^0.21.0",
"readable-stream": "^4.1.0",
"struct-fu": "^1.2.0",
"uuid": "^9.0.0"
"uuid": "^8.3.1"
},
"devDependencies": {
"@xen-orchestra/fs": "^3.1.0",
"@xen-orchestra/fs": "^3.0.0",
"execa": "^5.0.0",
"get-stream": "^6.0.0",
"rimraf": "^3.0.2",

View File

@@ -4,7 +4,7 @@ const { BLOCK_UNUSED, FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } = require('./_cons
const { readChunk } = require('@vates/read-chunk')
const assert = require('assert')
const { unpackFooter, unpackHeader, computeFullBlockSize } = require('./Vhd/_utils')
/*
const cappedBufferConcat = (buffers, maxSize) => {
let buffer = Buffer.concat(buffers)
if (buffer.length > maxSize) {
@@ -13,6 +13,114 @@ const cappedBufferConcat = (buffers, maxSize) => {
return buffer
}
exports.parseVhdStream = async function* parseVhdStream(stream) {
let bytesRead = 0
// handle empty space between elements
// ensure we read stream in order
async function read(offset, size) {
assert(bytesRead <= offset, `offset is ${offset} but we already read ${bytesRead} bytes`)
if (bytesRead < offset) {
// empty spaces
await read(bytesRead, offset - bytesRead)
}
const buf = await readChunk(stream, size)
assert.strictEqual(buf.length, size, `read ${buf.length} instead of ${size}`)
bytesRead += size
return buf
}
const bufFooter = await read(0, FOOTER_SIZE)
const footer = unpackFooter(bufFooter)
yield { type: 'footer', footer, offset: 0 }
const bufHeader = await read(FOOTER_SIZE, HEADER_SIZE)
const header = unpackHeader(bufHeader, footer)
yield { type: 'header', header, offset: SECTOR_SIZE }
const blockSize = header.blockSize
assert.strictEqual(blockSize % SECTOR_SIZE, 0)
const fullBlockSize = computeFullBlockSize(blockSize)
const bitmapSize = fullBlockSize - blockSize
const index = []
for (const parentLocatorId in header.parentLocatorEntry) {
const parentLocatorEntry = header.parentLocatorEntry[parentLocatorId]
// empty parent locator entry, does not exist in the content
if (parentLocatorEntry.platformDataSpace === 0) {
continue
}
index.push({
...parentLocatorEntry,
type: 'parentLocator',
offset: parentLocatorEntry.platformDataOffset,
size: parentLocatorEntry.platformDataLength,
id: parentLocatorId,
})
}
const batOffset = header.tableOffset
const batSize = Math.max(1, Math.ceil((header.maxTableEntries * 4) / SECTOR_SIZE)) * SECTOR_SIZE
index.push({
type: 'bat',
offset: batOffset,
size: batSize,
})
// sometimes some parent locator are before the BAT
index.sort((a, b) => a.offset - b.offset)
while (index.length > 0) {
const item = index.shift()
const buffer = await read(item.offset, item.size)
item.buffer = buffer
const { type } = item
if (type === 'bat') {
// found the BAT : read it and add block to index
let blockCount = 0
for (let blockCounter = 0; blockCounter < header.maxTableEntries; blockCounter++) {
const batEntrySector = buffer.readUInt32BE(blockCounter * 4)
// unallocated block, no need to export it
if (batEntrySector !== BLOCK_UNUSED) {
const batEntryBytes = batEntrySector * SECTOR_SIZE
// ensure the block is not before the bat
assert.ok(batEntryBytes >= batOffset + batSize)
index.push({
type: 'block',
id: blockCounter,
offset: batEntryBytes,
size: fullBlockSize,
})
blockCount++
}
}
// sort again index to ensure block and parent locator are in the right order
index.sort((a, b) => a.offset - b.offset)
item.blockCount = blockCount
} else if (type === 'block') {
item.bitmap = buffer.slice(0, bitmapSize)
item.data = buffer.slice(bitmapSize)
}
yield item
}
/**
* the second footer is at filesize - 512 , there can be empty spaces between last block
* and the start of the footer
*
* we read till the end of the stream, and use the last 512 bytes as the footer
*/
const bufFooterEnd = await readLastSector(stream)
assert(bufFooter.equals(bufFooterEnd), 'footer1 !== footer2')
}
function readLastSector(stream) {
return new Promise((resolve, reject) => {
let bufFooterEnd = Buffer.alloc(0)
@@ -25,227 +133,4 @@ function readLastSector(stream) {
stream.on('end', () => resolve(bufFooterEnd))
stream.on('error', reject)
})
} */
class StreamParser {
_bitmapSize = 0
_bytesRead = 0
_stream = null
_index = []
constructor(stream) {
this._stream = stream
}
async _read(offset, size) {
assert(this._bytesRead <= offset, `offset is ${offset} but we already read ${this._bytesRead} bytes`)
if (this._bytesRead < offset) {
// empty spaces
await this._read(this._bytesRead, offset - this._bytesRead)
}
const buf = await readChunk(this._stream, size)
assert.strictEqual(buf.length, size, `read ${buf.length} instead of ${size}`)
this._bytesRead += size
return buf
}
async *headers() {
const bufFooter = await this._read(0, FOOTER_SIZE)
const footer = unpackFooter(bufFooter)
yield { type: 'footer', footer, offset: 0 }
const bufHeader = await this._read(FOOTER_SIZE, HEADER_SIZE)
const header = unpackHeader(bufHeader, footer)
yield { type: 'header', header, offset: SECTOR_SIZE }
const blockSize = header.blockSize
assert.strictEqual(blockSize % SECTOR_SIZE, 0)
const fullBlockSize = computeFullBlockSize(blockSize)
this._bitmapSize = fullBlockSize - blockSize
let batFound = false
for (const parentLocatorId in header.parentLocatorEntry) {
const parentLocatorEntry = header.parentLocatorEntry[parentLocatorId]
// empty parent locator entry, does not exist in the content
if (parentLocatorEntry.platformDataSpace === 0) {
continue
}
this._index.push({
...parentLocatorEntry,
type: 'parentLocator',
offset: parentLocatorEntry.platformDataOffset,
size: parentLocatorEntry.platformDataLength,
id: parentLocatorId,
})
}
const batOffset = header.tableOffset
const batSize = Math.max(1, Math.ceil((header.maxTableEntries * 4) / SECTOR_SIZE)) * SECTOR_SIZE
this._index.push({
type: 'bat',
offset: batOffset,
size: batSize,
})
// sometimes some parent locator are before the BAT
this._index.sort((a, b) => a.offset - b.offset)
while (!batFound) {
const item = this._index.shift()
const buffer = await this._read(item.offset, item.size)
item.buffer = buffer
const { type } = item
if (type === 'bat') {
// found the BAT : read it and add block to index
let blockCount = 0
for (let blockCounter = 0; blockCounter < header.maxTableEntries; blockCounter++) {
const batEntrySector = buffer.readUInt32BE(blockCounter * 4)
// unallocated block, no need to export it
if (batEntrySector !== BLOCK_UNUSED) {
const batEntryBytes = batEntrySector * SECTOR_SIZE
// ensure the block is not before the bat
assert.ok(batEntryBytes >= batOffset + batSize)
this._index.push({
type: 'block',
id: blockCounter,
offset: batEntryBytes,
size: fullBlockSize,
})
blockCount++
}
}
// sort again index to ensure block and parent locator are in the right order
this._index.sort((a, b) => a.offset - b.offset)
item.blockCount = blockCount
batFound = true
}
yield item
}
}
async *blocks() {
while (this._index.length > 0) {
const item = this._index.shift()
const buffer = await this._read(item.offset, item.size)
item.bitmap = buffer.slice(0, this._bitmapSize)
item.data = buffer.slice(this._bitmapSize)
item.buffer = buffer
yield item
}
}
async *parse() {
yield* this.headers()
yield* this.blocks()
/**
* the second footer is at filesize - 512 , there can be empty spaces between last block
* and the start of the footer
*
* we read till the end of the stream, and use the last 512 bytes as the footer
*/
// const bufFooterEnd = await readLastSector(this._stream)
// assert(bufFooter.equals(bufFooterEnd), 'footer1 !== footer2')
}
}
// hybrid mode : read the headers from the vhd stream, and read the blocks from nbd
class StreamNbdParser extends StreamParser {
#nbdClient = null
#generateBlockSequentially = true
#concurrency = 16
constructor(stream, nbdClient, { generateBlockSequentially = true } = {}) {
super(stream)
this.#nbdClient = nbdClient
this.#generateBlockSequentially = generateBlockSequentially
}
async readBlockData(item) {
const SECTOR_BITMAP = Buffer.alloc(512, 255)
const client = this.#nbdClient
// we read in a raw file, so the block position is id x length, and have nothing to do with the offset
// in the vhd stream
const rawDataLength = item.size - SECTOR_BITMAP.length
let data = await client.readBlock(item.id, rawDataLength)
// end of file , non aligned vhd block
if (data.length < rawDataLength) {
data = Buffer.concat([data, Buffer.alloc(rawDataLength - data.length)])
}
const buffer = Buffer.concat([SECTOR_BITMAP, data])
const block = {
...item,
size: rawDataLength,
bitmap: SECTOR_BITMAP,
data,
buffer,
}
return block
}
async *blocks() {
const index = this._index
const promiseSlots = []
// parallel yielding
function next(position) {
const item = index.shift()
if (item) {
// update this ended
promiseSlots[position] = this.readBlockData(item)
.then(result => {
return { result, position }
})
.catch(error => {
console.error(error)
throw error
})
} else {
// no more block to handle
promiseSlots[position] = undefined
}
}
for (let i = 0; i < this.#concurrency; i++) {
next(i)
}
let runningPromises = []
while (true) {
runningPromises = promiseSlots.filter(p => p !== undefined)
if (runningPromises.length === 0) {
// done
break
}
// the failing promises will only be seen when ALL the running promises
// fails
const { result, position } = await Promise.any(runningPromises)
next(position)
yield result
}
}
async *parse() {
yield* this.headers()
yield* this.blocks()
this._stream.destroy()
}
}
exports.parseVhdStream = async function* parseVhdStream(stream, nbdClient) {
let parser
if (nbdClient) {
parser = new StreamNbdParser(stream, nbdClient)
} else {
parser = new StreamParser(stream)
}
yield* parser.parse()
}

View File

@@ -8,6 +8,6 @@
"promise-toolbox": "^0.19.2",
"readable-stream": "^3.1.1",
"throttle": "^1.0.3",
"vhd-lib": "^4.1.0"
"vhd-lib": "^4.0.0"
}
}

View File

@@ -36,7 +36,7 @@
"blocked": "^1.2.1",
"debug": "^4.0.1",
"http-request-plus": "^0.14.0",
"jest-diff": "^29.0.3",
"jest-diff": "^28.0.1",
"json-rpc-protocol": "^0.13.1",
"kindof": "^2.0.0",
"limit-concurrency-decorator": "^0.5.0",

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-saml",
"version": "0.10.1",
"version": "0.10.0",
"license": "AGPL-3.0-or-later",
"description": "SAML authentication plugin for XO-Server",
"keywords": [

View File

@@ -17,7 +17,6 @@ export const configurationSchema = {
type: 'string',
},
cert: {
$multiline: true,
title: 'Certificate',
description: "Copy/paste the identity provider's certificate",
type: 'string',

View File

@@ -34,7 +34,7 @@
"lodash": "^4.17.11",
"node-openssl-cert": "^0.1.34",
"promise-toolbox": "^0.21.0",
"uuid": "^9.0.0"
"uuid": "^8.3.1"
},
"private": true,
"license": "AGPL-3.0-or-later",

View File

@@ -28,10 +28,10 @@
"@iarna/toml": "^2.2.1",
"@vates/decorate-with": "^2.0.0",
"@vates/parse-duration": "^0.1.1",
"app-conf": "^2.3.0",
"app-conf": "^2.1.0",
"babel-plugin-lodash": "^3.2.11",
"golike-defer": "^0.5.1",
"jest": "^29.0.3",
"jest": "^28.1.2",
"lodash": "^4.17.11",
"promise-toolbox": "^0.21.0",
"xo-collection": "^0.5.0",

View File

@@ -87,19 +87,8 @@ snapshotNameLabelTpl = '[XO Backup {job.name}] {vm.name_label}'
# Delay for which backups listing on a remote is cached
listingDebounce = '1 min'
# settings when using Vhd directories ( s3 , encryption )
# you should use 'none' if your fs is already compressed
# changing this setting will generate new full backups
vhdDirectoryCompression = 'brotli'
# how many block can be merged in parallel per backup running
# increase to increase performance, reduce if you have timeout during merge
mergeBlockConcurrency = 2
# how many block can be uploaded in parallel
# increase to in rease performance, reduce if you have timeout or memory error during transfer
writeBlockConcurrency = 16
# This is a work-around.
#
# See https://github.com/vatesfr/xen-orchestra/pull/4674

View File

@@ -89,7 +89,7 @@ When logical volume no longer necessary:
```
> mkdir /tmp/block-mount
> mount --options=loop,ro,norecovery,offset=$(($START * 512)),sizelimit=$(($SIZE)) --source=/tmp/vhd-mount/vhdi2 --target=/tmp/block-mount
> mount --options=loop,ro,offset=$(($START * 512)),sizelimit=$(($SIZE)) --source=/tmp/vhd-mount/vhdi2 --target=/tmp/block-mount
> ls /tmp/block-mount
bin boot dev etc home lib lib64 lost+found media mnt opt proc root run sbin srv sys @System.solv tmp usr var
```

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.103.1",
"version": "5.101.0",
"license": "AGPL-3.0-or-later",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -36,24 +36,23 @@
"@vates/disposable": "^0.1.1",
"@vates/event-listeners-manager": "^1.0.1",
"@vates/multi-key-map": "^0.1.0",
"@vates/nbd-client": "^0.0.1",
"@vates/parse-duration": "^0.1.1",
"@vates/predicates": "^1.0.0",
"@vates/read-chunk": "^1.0.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.28.0",
"@xen-orchestra/backups": "^0.27.4",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/defined": "^0.0.1",
"@xen-orchestra/emit-async": "^1.0.0",
"@xen-orchestra/fs": "^3.1.0",
"@xen-orchestra/fs": "^3.0.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.8.0",
"@xen-orchestra/mixins": "^0.7.1",
"@xen-orchestra/self-signed": "^0.1.3",
"@xen-orchestra/template": "^0.1.0",
"@xen-orchestra/xapi": "^1.5.0",
"@xen-orchestra/xapi": "^1.4.2",
"ajv": "^8.0.3",
"app-conf": "^2.3.0",
"app-conf": "^2.1.0",
"async-iterator-to-stream": "^1.0.1",
"base64url": "^3.0.0",
"blocked-at": "^1.2.0",
@@ -82,7 +81,7 @@
"highland": "^2.11.1",
"http-proxy": "^1.16.2",
"http-request-plus": "^0.14.0",
"http-server-plus": "^0.12.0",
"http-server-plus": "^0.11.1",
"human-format": "^1.0.0",
"iterable-backoff": "^0.1.0",
"js-yaml": "^4.1.0",
@@ -124,9 +123,9 @@
"tar-stream": "^2.0.1",
"tmp": "^0.2.1",
"unzipper": "^0.10.5",
"uuid": "^9.0.0",
"uuid": "^8.3.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^4.1.0",
"vhd-lib": "^4.0.0",
"ws": "^8.2.3",
"xdg-basedir": "^5.1.0",
"xen-api": "^1.2.2",

View File

@@ -1207,7 +1207,6 @@ import_.params = {
optional: true,
},
},
additionalProperties: true,
},
type: { type: 'string', optional: true },
sr: { type: 'string' },

View File

@@ -107,10 +107,7 @@ const TRANSFORMS = {
current_operations: obj.current_operations,
default_SR: link(obj, 'default_SR'),
HA_enabled: Boolean(obj.ha_enabled),
// ignore undefined VDIs, which occurs if the objects were not fetched/cached yet.
haSrs: obj.$ha_statefiles.filter(vdi => vdi !== undefined).map(vdi => link(vdi, 'SR')),
haSrs: obj.$ha_statefiles.map(vdi => link(vdi, 'SR')),
master: link(obj, 'master'),
tags: obj.tags,
name_description: obj.name_description,

View File

@@ -382,10 +382,6 @@ export default class Xapi extends XapiBase {
getVmConsole(vmId) {
const vm = this.getObject(vmId)
if (vm.other_config.disable_pv_vnc === '1') {
throw new Error('console is disabled for this VM')
}
const console = find(vm.$consoles, { protocol: 'rfb' })
if (!console) {
throw new Error('no RFB console found')
@@ -882,20 +878,16 @@ export default class Xapi extends XapiBase {
throw error
}
throw Object.assign(
new AggregateError(
await asyncMap(await this.call('host.get_all'), async hostRef => {
const hostNameLabel = await this.call('host.get_name_label', hostRef)
try {
await this.call('VM.assert_can_boot_here', vmRef, hostRef)
return `${hostNameLabel}: OK`
} catch (error) {
return `${hostNameLabel}: ${error.message}`
}
}),
error.message
),
{ code: error.code, params: error.params }
throw new AggregateError(
await asyncMap(await this.call('host.get_all'), async hostRef => {
const hostNameLabel = await this.call('host.get_name_label', hostRef)
try {
await this.call('VM.assert_can_boot_here', vmRef, hostRef)
return `${hostNameLabel}: OK`
} catch (error) {
return `${hostNameLabel}: ${error.message}`
}
})
)
}
} else {

View File

@@ -266,7 +266,7 @@ export default class {
}
async isValidAuthenticationToken(id) {
return (await this._getAuthenticationToken(id)) !== undefined
return (await this.getAuthenticationToken(id)) !== undefined
}
async updateAuthenticationToken(properties, { description }) {

View File

@@ -193,17 +193,9 @@ export default class BackupNg {
const remotes = {}
const xapis = {}
const remoteErrors = {}
await waitAll([
asyncMapSettled(remoteIds, async id => {
let remote
try {
remote = await app.getRemoteWithCredentials(id)
} catch (error) {
log.warn('Error while instantiating remote', { error, remoteId: id })
remoteErrors[id] = error
return
}
const remote = await app.getRemoteWithCredentials(id)
if (remote.proxy !== proxyId) {
throw new Error(
proxyId === undefined
@@ -230,23 +222,6 @@ export default class BackupNg {
}),
])
// Fails the job if all remotes are disabled
//
// TODO: integrate each failure in its own tasks and still proceed
// with other tasks like rolling snapshot and replication.
if (remoteIds.length > 0 && Object.keys(remotes).length === 0) {
const error = new Error(`couldn't instantiate any remote`)
error.errors = remoteErrors
throw error
}
// update remotes list with only the enabled remotes
job.remotes = {
id: {
__or: Object.keys(remotes),
},
}
const params = {
job,
recordToXapi,

View File

@@ -22,8 +22,6 @@ export default class BackupsRemoteAdapter {
debounceResource: app.debounceResource.bind(app),
dirMode: app.config.get('backups.dirMode'),
vhdDirectoryCompression: app.config.get('backups.vhdDirectoryCompression'),
// this adapter is also used for file restore
useGetDiskLegacy: app.config.getOptional('backups.useGetDiskLegacy'),
})
}
}

View File

@@ -16,7 +16,7 @@ export default class {
this._ajv = new Ajv({
strict: 'log',
useDefaults: true,
}).addVocabulary(['$multiline', '$type', 'enumNames'])
}).addVocabulary(['$type', 'enumNames'])
this._plugins = { __proto__: null }
this._pluginsMetadata = new PluginsMetadata({

View File

@@ -13,6 +13,7 @@ import { Remotes } from '../models/remote.mjs'
const obfuscateRemote = ({ url, ...remote }) => {
const parsedUrl = parse(url)
remote.supportFileRestore = parsedUrl.type !== 's3'
remote.url = format(sensitiveValues.obfuscate(parsedUrl))
return remote
}

View File

@@ -26,7 +26,7 @@
"pako": "^2.0.4",
"promise-toolbox": "^0.21.0",
"tar-stream": "^2.2.0",
"vhd-lib": "^4.1.0",
"vhd-lib": "^4.0.0",
"xml2js": "^0.4.23"
},
"devDependencies": {

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-web",
"version": "5.104.0",
"version": "5.102.0",
"license": "AGPL-3.0-or-later",
"description": "Web interface client for Xen-Orchestra",
"keywords": [

View File

@@ -15,7 +15,6 @@ export default class Combobox extends Component {
options: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.string), PropTypes.objectOf(PropTypes.string)]),
onChange: PropTypes.func.isRequired,
value: PropTypes.string.isRequired,
multiline: PropTypes.bool,
}
_handleChange = event => {
@@ -27,11 +26,11 @@ export default class Combobox extends Component {
}
render() {
const { options, multiline = false, ...props } = this.props
const { options, ...props } = this.props
props.className = 'form-control'
props.onChange = this._handleChange
const Input = multiline ? <textarea {...props} /> : <input {...props} />
const Input = <input {...props} />
if (isEmpty(options)) {
return Input

View File

@@ -2676,6 +2676,9 @@ export default {
// Original text: 'Click on a VM to display restore options'
restoreBackupsInfo: undefined,
// Original text: 'Only the files of Delta Backup which are not on a SMB remote can be restored'
restoreDeltaBackupsInfo: undefined,
// Original text: "Enabled"
remoteEnabled: 'activado',

View File

@@ -2702,6 +2702,10 @@ export default {
// Original text: "Click on a VM to display restore options"
restoreBackupsInfo: 'Cliquez sur une VM pour afficher les options de récupération',
// Original text: "Only the files of Delta Backup which are not on a SMB remote can be restored"
restoreDeltaBackupsInfo:
'Seuls les fichiers de Delta Backup qui ne sont pas sur un emplacement SMB peuvent être restaurés',
// Original text: "Enabled"
remoteEnabled: 'activé',

View File

@@ -3906,6 +3906,9 @@ export default {
// Original text: 'Click on a VM to display restore options'
restoreBackupsInfo: 'Fare clic su una VM per visualizzare le opzioni di ripristino',
// Original text: 'Only the files of Delta Backup which are not on a SMB remote can be restored'
restoreDeltaBackupsInfo: 'È possibile ripristinare solo i file di Delta Backup che non si trovano su un SMB remoto',
// Original text: 'Enabled'
remoteEnabled: 'Abilitato',

View File

@@ -3343,6 +3343,9 @@ export default {
// Original text: "Click on a VM to display restore options"
restoreBackupsInfo: "Geri getirme seçenekleri için bir VM'e tıklayın",
// Original text: "Only the files of Delta Backup which are not on a SMB remote can be restored"
restoreDeltaBackupsInfo: 'Yalnızca SMB hedefinde olmayan fark yedeklerinden dosya alınabilir',
// Original text: "Enabled"
remoteEnabled: 'Etkin',

View File

@@ -30,7 +30,6 @@ const messages = {
messageFrom: 'From',
messageReply: 'Reply',
sr: 'SR',
subdirectory: 'Subdirectory',
tryXoa: 'Try XOA for free and deploy it here.',
notInstalled: 'Not installed',
@@ -1637,6 +1636,7 @@ const messages = {
getRemote: 'Get remote',
noBackups: 'There are no backups!',
restoreBackupsInfo: 'Click on a VM to display restore options',
restoreDeltaBackupsInfo: 'Only the files of Delta Backup which are not on a SMB or S3 remote can be restored',
remoteEnabled: 'Enabled',
remoteDisabled: 'Disabled',
enableRemote: 'Enable',

View File

@@ -33,7 +33,6 @@ export default class StringInput extends Component {
<Combobox
value={value !== undefined ? value : ''}
disabled={disabled}
multiline={schema.$multiline}
onChange={this._onChange}
options={schema.defaults}
placeholder={placeholder || schema.default}

View File

@@ -629,8 +629,7 @@ const getLinkedObjectsByTaskRefOrId = create(
export const getResolvedPendingTasks = create(
createGetObjectsOfType('task').filter([task => task.status === 'pending']),
getLinkedObjectsByTaskRefOrId,
getCheckPermissions,
(tasks, linkedObjectsByTaskRefOrId, check) => {
(tasks, linkedObjectsByTaskRefOrId) => {
const resolvedTasks = []
forEach(tasks, task => {
const objects = [
@@ -639,13 +638,10 @@ export const getResolvedPendingTasks = create(
// { taskId → operation } map instead of { taskRef → operation } map
...defined(linkedObjectsByTaskRefOrId[task.id], []),
]
if (objects.length > 0 || check(task.$host, 'view')) {
resolvedTasks.push({
...task,
objects,
})
}
resolvedTasks.push({
...task,
objects,
})
})
return resolvedTasks
}

View File

@@ -75,19 +75,13 @@ export const isVmRunning = vm => vm && vm.power_state === 'Running'
// ===================================================================
const reload = () => {
export const signOut = () => {
// prevent automatic reconnection
xo.removeListener('closed', connect)
window.location.reload(true)
}
export const signOut = () => {
cookies.remove('token')
reload()
}
export const connect = () => {
xo.open(createBackoff()).catch(error => {
logError(error, 'failed to connect to xo-server')
@@ -97,7 +91,7 @@ export const connect = () => {
const xo = invoke(() => {
const token = cookies.get('token')
if (!token) {
reload()
signOut()
throw new Error('no valid token')
}
@@ -105,7 +99,7 @@ const xo = invoke(() => {
credentials: { token },
})
xo.on('authenticationFailure', reload)
xo.on('authenticationFailure', signOut)
xo.on('scheduledAttempt', ({ delay }) => {
console.warn('next attempt in %s ms', delay)
})
@@ -1618,18 +1612,14 @@ export const deleteVms = async vms => {
if (vms.length === 1) {
return deleteVm(vms[0])
}
try {
await confirm({
title: _('deleteVmsModalTitle', { vms: vms.length }),
body: _('deleteVmsModalMessage', { vms: vms.length }),
strongConfirm: vms.length > 1 && {
messageId: 'deleteVmsConfirmText',
values: { nVms: vms.length },
},
})
} catch (err) {
return
}
await confirm({
title: _('deleteVmsModalTitle', { vms: vms.length }),
body: _('deleteVmsModalMessage', { vms: vms.length }),
strongConfirm: vms.length > 1 && {
messageId: 'deleteVmsConfirmText',
values: { nVms: vms.length },
},
}).catch(noop)
let nErrors = 0
await Promise.all(

View File

@@ -1,6 +1,7 @@
import _ from 'intl'
import ActionButton from 'action-button'
import Component from 'base-component'
import Icon from 'icon'
import React from 'react'
import SortedTable from 'sorted-table'
import Upgrade from 'xoa-upgrade'
@@ -86,7 +87,7 @@ export default class Restore extends Component {
_refreshBackupList = async (_remotes = this.props.remotes, jobs = this.props.jobs) => {
const remotes = keyBy(
filter(_remotes, remote => remote.enabled),
filter(_remotes, remote => remote.enabled && remote.supportFileRestore),
'id'
)
const backupsByRemote = await listVmBackups(toArray(remotes))
@@ -203,6 +204,9 @@ export default class Restore extends Component {
{_('refreshBackupList')}
</ActionButton>
</div>
<em>
<Icon icon='info' /> {_('restoreDeltaBackupsInfo')}
</em>
<SortedTable
actions={this._actions}
collection={this.state.backupDataByVm}

View File

@@ -511,7 +511,6 @@ export default class New extends Component {
summary: true,
})
} catch (err) {
this.setState({ summary: false, usage: false })
error('NFS Error', err.message || String(err))
} finally {
this.setState(({ loading }) => ({ loading: loading - 1 }))
@@ -569,10 +568,8 @@ export default class New extends Component {
lun,
luns,
nfsVersion,
nfsSubdir,
path,
paths,
selectedMainPath,
summary,
type,
usage,
@@ -698,9 +695,7 @@ export default class New extends Component {
defaultValue=''
id='selectSrPath'
onChange={event => {
const selectedPath = event.target.value
this.setState({ selectedMainPath: selectedPath })
this._handleSrPathSelection(selectedPath)
this._handleSrPathSelection(event.target.value)
}}
ref='path'
required
@@ -714,28 +709,6 @@ export default class New extends Component {
</option>
))}
</select>
{(type === 'nfs' || type === 'nfsiso') && selectedMainPath !== undefined && (
<div>
<label htmlFor='nfsSubdirectory'>{_('subdirectory')}</label>
<div className='input-group'>
<span className='input-group-addon'>/</span>
<input
className='form-control'
id='nfsSubdirectory'
type='text'
onChange={this.linkState('nfsSubdir')}
/>
<span className='input-group-btn'>
<ActionButton
icon='search'
handler={() => {
this._handleSrPathSelection(selectedMainPath.concat('/' + (nfsSubdir?.trim() ?? '')))
}}
/>
</span>
</div>
</div>
)}
</fieldset>
)}
{type === 'iscsi' && (

View File

@@ -42,7 +42,7 @@ const UnhealthyVdiChains = flowRight(
chains: createSrUnhealthyVdiChainsLengthSubscription(props.sr),
})),
connectStore(() => ({
vdis: createGetObjectsOfType('VDI').pick(createSelector((_, props) => props.chains?.unhealthyVdis, keys)),
vdis: createGetObjectsOfType('VDI').pick(createSelector((_, props) => props.chains, keys)),
}))
)(({ chains: { unhealthyVdis } = {}, vdis }) =>
isEmpty(vdis) ? null : (

View File

@@ -10,7 +10,7 @@ import { addSubscriptions, connectStore, resolveIds } from 'utils'
import { FormattedDate, FormattedRelative, injectIntl } from 'react-intl'
import { SelectPool } from 'select-objects'
import { Col, Container, Row } from 'grid'
import { differenceBy, isEmpty, map, some } from 'lodash'
import { differenceBy, flatMap, groupBy, isEmpty, keys, map, some } from 'lodash'
import {
createFilter,
createGetObject,
@@ -192,18 +192,20 @@ const GROUPED_ACTIONS = [
permissions: subscribePermissions,
})
@connectStore(() => {
const getPools = createGetObjectsOfType('pool').pick(
createSelector(getResolvedPendingTasks, resolvedPendingTasks => resolvedPendingTasks.map(task => task.$poolId))
const getResolvedPendingTasksByPool = createSelector(getResolvedPendingTasks, resolvedPendingTasks =>
groupBy(resolvedPendingTasks, '$pool')
)
const getPools = createGetObjectsOfType('pool').pick(createSelector(getResolvedPendingTasksByPool, keys))
return (state, props) => {
// true: useResourceSet to bypass permissions
const resolvedPendingTasks = getResolvedPendingTasks(state, props, true)
const resolvedPendingTasksByPool = getResolvedPendingTasks(state, props, true)
return {
isAdmin: isAdmin(state, props),
nResolvedTasks: resolvedPendingTasks.length,
nResolvedTasks: resolvedPendingTasksByPool.length,
pools: getPools(state, props, true),
resolvedPendingTasks,
resolvedPendingTasksByPool,
}
}
})
@@ -214,7 +216,7 @@ export default class Tasks extends Component {
}
componentWillReceiveProps(props) {
const finishedTasks = differenceBy(this.props.resolvedPendingTasks, props.resolvedPendingTasks, 'id')
const finishedTasks = differenceBy(this.props.resolvedPendingTasksByPool, props.resolvedPendingTasksByPool, 'id')
if (!isEmpty(finishedTasks)) {
this.setState({
finishedTasks: finishedTasks
@@ -224,14 +226,22 @@ export default class Tasks extends Component {
}
}
_getPoolFilter = createSelector(
_getTasks = createSelector(
createSelector(() => this.state.pools, resolveIds),
poolIds => (isEmpty(poolIds) ? null : ({ $poolId }) => poolIds.includes($poolId))
() => this.props.resolvedPendingTasksByPool,
(poolIds, resolvedPendingTasksByPool) =>
isEmpty(poolIds)
? resolvedPendingTasksByPool
: flatMap(poolIds, poolId => resolvedPendingTasksByPool[poolId] || [])
)
_getTasks = createFilter(() => this.props.resolvedPendingTasks, this._getPoolFilter)
_getFinishedTasks = createFilter(() => this.state.finishedTasks, this._getPoolFilter)
_getFinishedTasks = createFilter(
() => this.state.finishedTasks,
createSelector(
createSelector(() => this.state.pools, resolveIds),
poolIds => (isEmpty(poolIds) ? null : ({ $poolId }) => poolIds.includes($poolId))
)
)
_getItemsPerPageContainer = () => this.state.itemsPerPageContainer

View File

@@ -32,7 +32,7 @@ const gitDiff = (what, args = []) =>
.split('\n')
.filter(_ => _ !== '')
const gitDiffFiles = (files = []) => gitDiff('files', files)
const gitDiffIndex = () => gitDiff('index', ['--cached', 'HEAD~1'])
const gitDiffIndex = () => gitDiff('index', ['--cached', 'HEAD'])
// -----------------------------------------------------------------------------

7660
yarn.lock

File diff suppressed because it is too large Load Diff