Compare commits

...

30 Commits

Author SHA1 Message Date
Florent BEAUCHAMP
862d9a6a7f feat: use additionnal file for checksum instead of attributes 2023-07-24 18:08:19 +02:00
Florent BEAUCHAMP
06cabcfb21 use chunk filters to store dedup 2023-07-24 15:20:01 +02:00
Florent BEAUCHAMP
50f378ec1e fixup! feat(fs): use multiplatform module instead of call to local binary 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
506a6aad08 feat(backup): show dedup status in restore popup + cleanup and tests 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
447112b583 feat(fs): use multiplatform module instead of call to local binary 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
b380e085d2 feat(backups): store dedup information in filepath 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
d752b1ed70 tests and docs 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
16f4fcfd04 refacto and tests 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
69a0e0e563 fixes following review 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
456e4f213b feat: parser 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
a6d24a6dfa test and fixes 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
391c778515 fix(cleanVm): handle broken-er alias 2023-07-20 10:12:02 +02:00
Florent BEAUCHAMP
4e125ede88 feat(@xen-orchestra/fs): implement deduplication for vhd directory 2023-07-20 10:12:02 +02:00
Julien Fontanet
003140d96b test(nbd-client): fix issues introduced by conversion to ESM
Introduced by 7c80d0c1e
2023-07-19 23:09:48 +02:00
Julien Fontanet
363d7cf0d0 fix(node-vsphere-soap): add missing files
Introduced by f0c94496b
2023-07-19 23:02:34 +02:00
Julien Fontanet
f0c94496bf chore(node-vsphere-soap): convert to ESM
BREAKING CHANGE
2023-07-19 11:03:56 +02:00
Julien Fontanet
de217eabd9 test(nbd-client): fix issues introduced by conversion to ESM
Introduced by 7c80d0c1e
2023-07-19 11:02:12 +02:00
Julien Fontanet
7c80d0c1e1 chore(nbd-client): convert to ESM
BREAKING CHANGE
2023-07-19 10:46:05 +02:00
Julien Fontanet
9fb749b1db chore(fuse-vhd): convert to ESM
BREAKING CHANGE
2023-07-19 10:13:35 +02:00
Julien Fontanet
ad9c59669a chore: update dev deps 2023-07-19 10:11:30 +02:00
Julien Fontanet
76a038e403 fix(xo-web): fix doc link to incremental/key backup interval 2023-07-19 09:48:23 +02:00
Julien Fontanet
0e12072922 fix(xo-server/pool.mergeInto): fix IPv6 handling 2023-07-18 17:28:39 +02:00
Julien Fontanet
158a8e14a2 chore(xen-api): expose bracketless IPv6 as hostnameRaw 2023-07-18 17:28:38 +02:00
Julien Fontanet
0c97910349 chore(xo-server): remove unused _mounts property
Introduced by 5c9a47b6b
2023-07-18 11:14:24 +02:00
Florent BEAUCHAMP
8347ac6ed8 fix(xo-server/xapi-stats): simplify caching (#6920)
Following #6903

- change cache system per object => per host
- update cache at the beginning of the query to handle race conditions leading to duplicate requests
- remove concurrency limit (was leading to a huge backlog of queries, and response handling is quite fast)
2023-07-18 09:47:39 +02:00
Mathieu
996abd6e7e fix(xo-web/settings/config): wording fix for XO Config Cloud Backup (#6938)
See zammad#15904
2023-07-13 10:29:36 +02:00
rbarhtaoui
de8abd5b63 feat(lite/pool/vms): ability to export selected VMs as JSON file (#6911) 2023-07-13 09:30:10 +02:00
Julien Fontanet
3de928c488 fix(xo-server-audit): ignore mirrorBackup.getAllJobs 2023-07-12 21:52:51 +02:00
Mathieu
a2a514e483 feat(lite/stats): cache stats from rrd_update (#6781) 2023-07-12 15:30:05 +02:00
rbarhtaoui
ff432e04b0 feat(lite/pool/vms): export selected VMs as CSV file (#6915) 2023-07-12 14:34:16 +02:00
57 changed files with 2981 additions and 2119 deletions

View File

@@ -1,9 +1,7 @@
'use strict'
const LRU = require('lru-cache')
const Fuse = require('fuse-native')
const { VhdSynthetic } = require('vhd-lib')
const { Disposable, fromCallback } = require('promise-toolbox')
import LRU from 'lru-cache'
import Fuse from 'fuse-native'
import { VhdSynthetic } from 'vhd-lib'
import { Disposable, fromCallback } from 'promise-toolbox'
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
const stat = st => ({
@@ -16,7 +14,7 @@ const stat = st => ({
gid: st.gid !== undefined ? st.gid : process.getgid(),
})
exports.mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
export const mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
const cache = new LRU({

View File

@@ -15,8 +15,9 @@
"url": "https://vates.fr"
},
"engines": {
"node": ">=10.0"
"node": ">=14"
},
"main": "./index.mjs",
"dependencies": {
"fuse-native": "^2.2.6",
"lru-cache": "^7.14.0",

View File

@@ -1,42 +0,0 @@
'use strict'
exports.INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
exports.OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
exports.NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
exports.NBD_OPT_EXPORT_NAME = 1
exports.NBD_OPT_ABORT = 2
exports.NBD_OPT_LIST = 3
exports.NBD_OPT_STARTTLS = 5
exports.NBD_OPT_INFO = 6
exports.NBD_OPT_GO = 7
exports.NBD_FLAG_HAS_FLAGS = 1 << 0
exports.NBD_FLAG_READ_ONLY = 1 << 1
exports.NBD_FLAG_SEND_FLUSH = 1 << 2
exports.NBD_FLAG_SEND_FUA = 1 << 3
exports.NBD_FLAG_ROTATIONAL = 1 << 4
exports.NBD_FLAG_SEND_TRIM = 1 << 5
exports.NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
exports.NBD_CMD_FLAG_FUA = 1 << 0
exports.NBD_CMD_FLAG_NO_HOLE = 1 << 1
exports.NBD_CMD_FLAG_DF = 1 << 2
exports.NBD_CMD_FLAG_REQ_ONE = 1 << 3
exports.NBD_CMD_FLAG_FAST_ZERO = 1 << 4
exports.NBD_CMD_READ = 0
exports.NBD_CMD_WRITE = 1
exports.NBD_CMD_DISC = 2
exports.NBD_CMD_FLUSH = 3
exports.NBD_CMD_TRIM = 4
exports.NBD_CMD_CACHE = 5
exports.NBD_CMD_WRITE_ZEROES = 6
exports.NBD_CMD_BLOCK_STATUS = 7
exports.NBD_CMD_RESIZE = 8
exports.NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
exports.NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
exports.NBD_REPLY_ACK = 1
exports.NBD_DEFAULT_PORT = 10809
exports.NBD_DEFAULT_BLOCK_SIZE = 64 * 1024

View File

@@ -0,0 +1,41 @@
export const INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
export const OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
export const NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
export const NBD_OPT_EXPORT_NAME = 1
export const NBD_OPT_ABORT = 2
export const NBD_OPT_LIST = 3
export const NBD_OPT_STARTTLS = 5
export const NBD_OPT_INFO = 6
export const NBD_OPT_GO = 7
export const NBD_FLAG_HAS_FLAGS = 1 << 0
export const NBD_FLAG_READ_ONLY = 1 << 1
export const NBD_FLAG_SEND_FLUSH = 1 << 2
export const NBD_FLAG_SEND_FUA = 1 << 3
export const NBD_FLAG_ROTATIONAL = 1 << 4
export const NBD_FLAG_SEND_TRIM = 1 << 5
export const NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
export const NBD_CMD_FLAG_FUA = 1 << 0
export const NBD_CMD_FLAG_NO_HOLE = 1 << 1
export const NBD_CMD_FLAG_DF = 1 << 2
export const NBD_CMD_FLAG_REQ_ONE = 1 << 3
export const NBD_CMD_FLAG_FAST_ZERO = 1 << 4
export const NBD_CMD_READ = 0
export const NBD_CMD_WRITE = 1
export const NBD_CMD_DISC = 2
export const NBD_CMD_FLUSH = 3
export const NBD_CMD_TRIM = 4
export const NBD_CMD_CACHE = 5
export const NBD_CMD_WRITE_ZEROES = 6
export const NBD_CMD_BLOCK_STATUS = 7
export const NBD_CMD_RESIZE = 8
export const NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
export const NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
export const NBD_REPLY_ACK = 1
export const NBD_DEFAULT_PORT = 10809
export const NBD_DEFAULT_BLOCK_SIZE = 64 * 1024

View File

@@ -1,8 +1,11 @@
'use strict'
const assert = require('node:assert')
const { Socket } = require('node:net')
const { connect } = require('node:tls')
const {
import assert from 'node:assert'
import { Socket } from 'node:net'
import { connect } from 'node:tls'
import { fromCallback, pRetry, pDelay, pTimeout } from 'promise-toolbox'
import { readChunkStrict } from '@vates/read-chunk'
import { createLogger } from '@xen-orchestra/log'
import {
INIT_PASSWD,
NBD_CMD_READ,
NBD_DEFAULT_BLOCK_SIZE,
@@ -17,16 +20,13 @@ const {
NBD_REQUEST_MAGIC,
OPTS_MAGIC,
NBD_CMD_DISC,
} = require('./constants.js')
const { fromCallback, pRetry, pDelay, pTimeout } = require('promise-toolbox')
const { readChunkStrict } = require('@vates/read-chunk')
const { createLogger } = require('@xen-orchestra/log')
} from './constants.mjs'
const { warn } = createLogger('vates:nbd-client')
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
module.exports = class NbdClient {
export default class NbdClient {
#serverAddress
#serverCert
#serverPort

View File

@@ -17,6 +17,7 @@
"engines": {
"node": ">=14.0"
},
"main": "./index.mjs",
"dependencies": {
"@vates/async-each": "^1.0.0",
"@vates/read-chunk": "^1.1.1",
@@ -31,6 +32,6 @@
},
"scripts": {
"postversion": "npm publish --access public",
"test-integration": "tap --lines 97 --functions 95 --branches 74 --statements 97 tests/*.integ.js"
"test-integration": "tap --lines 97 --functions 95 --branches 74 --statements 97 tests/*.integ.mjs"
}
}

View File

@@ -1,13 +1,12 @@
'use strict'
const NbdClient = require('../index.js')
const { spawn, exec } = require('node:child_process')
const fs = require('node:fs/promises')
const { test } = require('tap')
const tmp = require('tmp')
const { pFromCallback } = require('promise-toolbox')
const { Socket } = require('node:net')
const { NBD_DEFAULT_PORT } = require('../constants.js')
const assert = require('node:assert')
import NbdClient from '../index.mjs'
import { spawn, exec } from 'node:child_process'
import fs from 'node:fs/promises'
import { test } from 'tap'
import tmp from 'tmp'
import { pFromCallback } from 'promise-toolbox'
import { Socket } from 'node:net'
import { NBD_DEFAULT_PORT } from '../constants.mjs'
import assert from 'node:assert'
const FILE_SIZE = 10 * 1024 * 1024

View File

@@ -1,4 +1,3 @@
'use strict'
/*
node-vsphere-soap
@@ -12,17 +11,18 @@
*/
const EventEmitter = require('events').EventEmitter
const axios = require('axios')
const https = require('node:https')
const util = require('util')
const soap = require('soap')
const Cookie = require('soap-cookie') // required for session persistence
import { EventEmitter } from 'events'
import axios from 'axios'
import https from 'node:https'
import util from 'util'
import soap from 'soap'
import Cookie from 'soap-cookie' // required for session persistence
// Client class
// inherits from EventEmitter
// possible events: connect, error, ready
function Client(vCenterHostname, username, password, sslVerify) {
export function Client(vCenterHostname, username, password, sslVerify) {
this.status = 'disconnected'
this.reconnectCount = 0
@@ -228,4 +228,3 @@ function _soapErrorHandler(self, emitter, command, args, err) {
}
// end
exports.Client = Client

View File

@@ -2,7 +2,7 @@
"name": "@vates/node-vsphere-soap",
"version": "1.0.0",
"description": "interface to vSphere SOAP/WSDL from node for interfacing with vCenter or ESXi, forked from node-vsphere-soap",
"main": "lib/client.js",
"main": "lib/client.mjs",
"author": "reedog117",
"repository": {
"directory": "@vates/node-vsphere-soap",
@@ -30,7 +30,7 @@
"private": false,
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/node-vsphere-soap",
"engines": {
"node": ">=8.10"
"node": ">=14"
},
"scripts": {
"postversion": "npm publish --access public"

View File

@@ -1,15 +1,11 @@
'use strict'
// place your own credentials here for a vCenter or ESXi server
// this information will be used for connecting to a vCenter instance
// for module testing
// name the file config-test.js
const vCenterTestCreds = {
export const vCenterTestCreds = {
vCenterIP: 'vcsa',
vCenterUser: 'vcuser',
vCenterPassword: 'vcpw',
vCenter: true,
}
exports.vCenterTestCreds = vCenterTestCreds

View File

@@ -1,18 +1,16 @@
'use strict'
/*
vsphere-soap.test.js
tests for the vCenterConnectionInstance class
*/
const assert = require('assert')
const { describe, it } = require('test')
import assert from 'assert'
import { describe, it } from 'test'
const vc = require('../lib/client')
import * as vc from '../lib/client.mjs'
// eslint-disable-next-line n/no-missing-require
const TestCreds = require('../config-test.js').vCenterTestCreds
// eslint-disable-next-line n/no-missing-import
import { vCenterTestCreds as TestCreds } from '../config-test.mjs'
const VItest = new vc.Client(TestCreds.vCenterIP, TestCreds.vCenterUser, TestCreds.vCenterPassword, false)

View File

@@ -660,13 +660,14 @@ export class RemoteAdapter {
return path
}
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency } = {}) {
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency, dedup = false } = {}) {
const handler = this._handler
if (this.useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
const size = await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: writeBlockConcurrency,
compression: this.#getCompressionType(),
dedup,
async validator() {
await input.task
return validator.apply(this, arguments)

View File

@@ -123,19 +123,19 @@ export async function checkAliases(
) {
const aliasFound = []
for (const alias of aliasPaths) {
const target = await resolveVhdAlias(handler, alias)
if (!isVhdFile(target)) {
logWarn('alias references non VHD target', { alias, target })
if (remove) {
logInfo('removing alias and non VHD target', { alias, target })
await handler.unlink(target)
await handler.unlink(alias)
}
continue
}
let target
try {
target = await resolveVhdAlias(handler, alias)
if (!isVhdFile(target)) {
logWarn('alias references non VHD target', { alias, target })
if (remove) {
logInfo('removing alias and non VHD target', { alias, target })
await handler.unlink(target)
await handler.unlink(alias)
}
continue
}
const { dispose } = await openVhd(handler, target)
try {
await dispose()

View File

@@ -17,6 +17,7 @@ const DEFAULT_XAPI_VM_SETTINGS = {
concurrency: 2,
copyRetention: 0,
deleteFirst: false,
dedup: false,
diskPerVmConcurrency: 0, // not limited by default
exportRetention: 0,
fullInterval: 0,

View File

@@ -160,6 +160,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
)
metadataContent = {
dedup: settings.dedup,
jobId,
mode: job.mode,
scheduleId,
@@ -208,6 +209,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
dedup: settings.dedup,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._config.writeBlockConcurrency,
})

View File

@@ -45,6 +45,34 @@ When `useVhdDirectory` is enabled on the remote, the directory containing the VH
└─ <uuid>.vhd
```
#### vhd directory with deduplication
the difference with non dedup mode is that a hash is computed of each vhd block. The hash is splited in 4 chars token and the data are stored in xo-block-store/{token1}/.../{token7}/{token8}.source.
Then a hard link is made from this source to the destination folder in <vdis>/<job UUID>/<VDI UUID>/blocks/{number}/{number}
```
<remote>
└─ xo-block-store
└─ {4 char}
└─ ...
└─ {char.source}
└─ xo-vm-backups
├─ index.json // TODO
└─ <VM UUID>
├─ cache.json.gz
├─ vdis
│ └─ <job UUID>
│ └─ <VDI UUID>
│ ├─ index.json // TODO
│ ├─ <YYYYMMDD>T<HHmmss>.alias.vhd // contains the relative path to a VHD directory
| └─ data
| ├─ <uuid>.vhd // VHD directory format is described in vhd-lib/Vhd/VhdDirectory.js
├─ <YYYYMMDD>T<HHmmss>.json // backup metadata
├─ <YYYYMMDD>T<HHmmss>.xva
└─ <YYYYMMDD>T<HHmmss>.xva.checksum
```
## Cache for a VM
In a VM directory, if the file `cache.json.gz` exists, it contains the metadata for all the backups for this VM.

View File

@@ -0,0 +1,23 @@
# Deduplication
- This this use a additionnal inode (or equivalent on the FS), for each different block in the xo-block-store`sub folder`
- This will not work well with immutabilty/object lock
- only dedup blocks of vhd directory
- prerequisite are : the fs must support hard link and extended attributes
- a key (full backup) does not take more space on te remote than a delta. It will take more inodes , and more time since we'll have to read all the blocks. T
When a new block is written to the remote, a hash is computed. If a file with this hash doesn't exists in xo-block-store` create it, then add the has as an extended attributes.
A link hard link, sharing data and extended attributes is then create to the destination
When deleting a block which has a hash extended attributes, a check is done on the xo-block-store. If there are no other link, then the block is deleted . The directory containing it stays
When merging block : the unlink method is called before overwriting an existing block
### troubleshooting
Since all the blocks are hard linked, you can convert a deduplicated remote to a non deduplicated one by deleting the xo-block-store directory
two new method has been added to the local fs handler :
- deduplicationGarbageCollector(), which should be called from the root of the FS : it will clean any block without other links, and any empty directory
- deduplicationStats() that will compute the number of blocks in store and how many times they are used

View File

@@ -16,6 +16,7 @@ function formatVmBackup(backup) {
}),
id: backup.id,
dedup: backup.dedup,
jobId: backup.jobId,
mode: backup.mode,
scheduleId: backup.scheduleId,

View File

@@ -34,6 +34,7 @@
"bind-property-descriptor": "^2.0.0",
"decorator-synchronized": "^0.6.0",
"execa": "^5.0.0",
"fs-extended-attributes": "^1.0.1",
"fs-extra": "^11.1.0",
"get-stream": "^6.0.0",
"limit-concurrency-decorator": "^0.5.0",

View File

@@ -268,9 +268,9 @@ export default class RemoteHandlerAbstract {
await this._mktree(normalizePath(dir), { mode })
}
async outputFile(file, data, { dirMode, flags = 'wx' } = {}) {
async outputFile(file, data, { dedup = false, dirMode, flags = 'wx' } = {}) {
const encryptedData = this.#encryptor.encryptData(data)
await this._outputFile(normalizePath(file), encryptedData, { dirMode, flags })
await this._outputFile(normalizePath(file), encryptedData, { dedup, dirMode, flags })
}
async read(file, buffer, position) {
@@ -319,8 +319,8 @@ export default class RemoteHandlerAbstract {
await timeout.call(this._rmdir(normalizePath(dir)).catch(ignoreEnoent), this._timeout)
}
async rmtree(dir) {
await this._rmtree(normalizePath(dir))
async rmtree(dir, { dedup } = {}) {
await this._rmtree(normalizePath(dir), { dedup })
}
// Asks the handler to sync the state of the effective remote with its'
@@ -397,6 +397,10 @@ export default class RemoteHandlerAbstract {
}
}
async checkSupport() {
return {}
}
async test() {
const SIZE = 1024 * 1024 * 10
const testFileName = normalizePath(`${Date.now()}.test`)
@@ -437,14 +441,14 @@ export default class RemoteHandlerAbstract {
await this._truncate(file, len)
}
async __unlink(file, { checksum = true } = {}) {
async __unlink(file, { checksum = true, dedup = false } = {}) {
file = normalizePath(file)
if (checksum) {
ignoreErrors.call(this._unlink(checksumFile(file)))
}
await this._unlink(file).catch(ignoreEnoent)
await this._unlink(file, { dedup }).catch(ignoreEnoent)
}
async write(file, buffer, position) {
@@ -560,17 +564,16 @@ export default class RemoteHandlerAbstract {
throw new Error('Not implemented')
}
async _outputFile(file, data, { dirMode, flags }) {
async _outputFile(file, data, { dirMode, flags, dedup = false }) {
try {
return await this._writeFile(file, data, { flags })
return await this._writeFile(file, data, { dedup, flags })
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
}
}
await this._mktree(dirname(file), { mode: dirMode })
return this._outputFile(file, data, { flags })
return this._outputFile(file, data, { dedup, flags })
}
async _outputStream(path, input, { dirMode, validator }) {
@@ -613,7 +616,7 @@ export default class RemoteHandlerAbstract {
throw new Error('Not implemented')
}
async _rmtree(dir) {
async _rmtree(dir, { dedup } = {}) {
try {
return await this._rmdir(dir)
} catch (error) {
@@ -624,7 +627,7 @@ export default class RemoteHandlerAbstract {
const files = await this._list(dir)
await asyncEach(files, file =>
this._unlink(`${dir}/${file}`).catch(error => {
this._unlink(`${dir}/${file}`, { dedup }).catch(error => {
// Unlink dir behavior is not consistent across platforms
// https://github.com/nodejs/node-v0.x-archive/issues/5791
if (error.code === 'EISDIR' || error.code === 'EPERM') {
@@ -639,7 +642,7 @@ export default class RemoteHandlerAbstract {
// called to initialize the remote
async _sync() {}
async _unlink(file) {
async _unlink(file, opts) {
throw new Error('Not implemented')
}

View File

@@ -19,7 +19,8 @@ try {
} catch (_) {}
export const getHandler = (remote, ...rest) => {
const Handler = HANDLERS[parse(remote.url).type]
const { type } = parse(remote.url)
const Handler = HANDLERS[type]
if (!Handler) {
throw new Error('Unhandled remote type')
}

View File

@@ -1,10 +1,17 @@
import df from '@sindresorhus/df'
import fs from 'fs-extra'
// import fsx from 'fs-extended-attributes'
import lockfile from 'proper-lockfile'
import { createLogger } from '@xen-orchestra/log'
import { fromEvent, retry } from 'promise-toolbox'
import { asyncEach } from '@vates/async-each'
import { fromEvent, fromCallback, ignoreErrors, retry } from 'promise-toolbox'
import { synchronized } from 'decorator-synchronized'
import RemoteHandlerAbstract from './abstract'
import { normalize as normalizePath } from './path'
import assert from 'node:assert'
import { createHash, randomBytes } from 'node:crypto'
const { info, warn } = createLogger('xo:fs:local')
@@ -37,6 +44,10 @@ export default class LocalHandler extends RemoteHandlerAbstract {
#addSyncStackTrace
#retriesOnEagain
#supportDedup
#dedupDirectory = '/xo-block-store'
#hashMethod = 'sha256'
#attributeKey = `user.hash.${this.#hashMethod}`
constructor(remote, opts = {}) {
super(remote)
@@ -194,16 +205,267 @@ export default class LocalHandler extends RemoteHandlerAbstract {
return this.#addSyncStackTrace(fs.truncate, this.getFilePath(file), len)
}
async _unlink(file) {
const filePath = this.getFilePath(file)
async #localUnlink(filePath) {
return await this.#addSyncStackTrace(retry, () => fs.unlink(filePath), this.#retriesOnEagain)
}
async _unlink(file, { dedup } = {}) {
const filePath = this.getFilePath(file)
let hash
// only try to read dedup source if we try to delete something deduplicated
if (dedup === true) {
try {
// get hash before deleting the file
hash = await this.#getExtendedAttribute(file, this.#attributeKey)
} catch (err) {
// whatever : fall back to normal delete
}
}
// delete file in place
await this.#localUnlink(filePath)
// implies we are on a deduplicated file
if (hash !== undefined) {
const dedupPath = this.getFilePath(this.#computeDeduplicationPath(hash))
await this.#removeExtendedAttribute(file, this.#attributeKey)
try {
const { nlink } = await fs.stat(dedupPath)
// get the number of copy still using these data
// delete source if it's alone
if (nlink === 1) {
await this.#localUnlink(dedupPath)
}
} catch (error) {
// no problem if another process deleted the source or if we unlink directly the source file
if (error.code !== 'ENOENT') {
throw error
}
}
}
}
_writeFd(file, buffer, position) {
return this.#addSyncStackTrace(fs.write, file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, { flags }) {
#localWriteFile(file, data, { flags }) {
return this.#addSyncStackTrace(fs.writeFile, this.getFilePath(file), data, { flag: flags })
}
async _writeFile(file, data, { flags, dedup }) {
if (dedup === true) {
// only compute support once , and only if needed
if (this.#supportDedup === undefined) {
const supported = await this.checkSupport()
this.#supportDedup = supported.hardLink === true && supported.extendedAttributes === true
}
if (this.#supportDedup) {
const hash = this.#hash(data)
// create the file (if not already present) in the store
const dedupPath = await this.#writeDeduplicationSource(hash, data)
// hard link to the target place
// this linked file will have the same extended attributes
// (used for unlink)
return this.#link(dedupPath, file)
}
}
// fallback
return this.#localWriteFile(file, data, { flags })
}
#hash(data) {
return createHash(this.#hashMethod).update(data).digest('hex')
}
async #getExtendedAttribute(file, attributeName) {
try{
return this._readFile(file+attributeName)
}catch(err){
if(err.code === 'ENOENT'){
return
}
throw err
}
}
async #setExtendedAttribute(file, attributeName, value) {
return this._writeFile(file+attributeName, value)
}
async #removeExtendedAttribute(file, attributeName){
return this._unlink(file+attributeName)
}
/*
async #getExtendedAttribute(file, attributeName) {
return new Promise((resolve, reject) => {
fsx.get(this.getFilePath(file), attributeName, (err, res) => {
if (err) {
reject(err)
} else {
// res is a buffer
// it is null if the file doesn't have this attribute
if (res !== null) {
resolve(res.toString('utf-8'))
}
resolve(undefined)
}
})
})
}
async #setExtendedAttribute(file, attributeName, value) {
return new Promise((resolve, reject) => {
fsx.set(this.getFilePath(file), attributeName, value, (err, res) => {
if (err) {
reject(err)
} else {
resolve(res)
}
})
})
}
async #removeExtendedAttribute(file, attributeName){
}
*/
// create a hard link between to files
#link(source, dest) {
return fs.link(this.getFilePath(source), this.getFilePath(dest))
}
// split path to keep a sane number of file per directory
#computeDeduplicationPath(hash) {
assert.strictEqual(hash.length % 4, 0)
let path = this.#dedupDirectory
for (let i = 0; i < hash.length; i++) {
if (i % 4 === 0) {
path += '/'
}
path += hash[i]
}
path += '.source'
return path
}
async #writeDeduplicationSource(hash, data) {
const path = this.#computeDeduplicationPath(hash)
try {
// flags ensures it fails if it already exists
// _outputfile will create the directory tree
await this._outputFile(path, data, { flags: 'wx' })
} catch (error) {
// if it is alread present : not a problem
if (error.code === 'EEXIST') {
// it should already have the extended attributes, nothing more to do
return path
}
throw error
}
try {
await this.#setExtendedAttribute(path, this.#attributeKey, hash)
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
}
// if a concurrent process deleted the dedup : recreate it
return this.#writeDeduplicationSource(path, hash)
}
return path
}
/**
* delete empty dirs
* delete file source thath don't have any more links
*
* @returns Promise
*/
async deduplicationGarbageCollector(dir = this.#dedupDirectory, alreadyVisited = false) {
try {
await this._rmdir(dir)
return
} catch (error) {
if (error.code !== 'ENOTEMPTY') {
throw error
}
}
// the directory may not be empty after a first visit
if (alreadyVisited) {
return
}
const files = await this._list(dir)
await asyncEach(
files,
async file => {
const stat = await fs.stat(this.getFilePath(`${dir}/${file}`))
// have to check the stat to ensure we don't try to delete
// the directories : they don't have links
if (stat.isDirectory()) {
return this.deduplicationGarbageCollector(`${dir}/${file}`)
}
if (stat.nlink === 1) {
return fs.unlink(this.getFilePath(`${dir}/${file}`))
}
},
{ concurrency: 2 }
) // since we do a recursive traveral with a deep tree)
return this.deduplicationGarbageCollector(dir, true)
}
async deduplicationStats(dir = this.#dedupDirectory) {
let nbSourceBlocks = 0
let nbBlocks = 0
try {
const files = await this._list(dir)
await asyncEach(
files,
async file => {
const stat = await fs.stat(this.getFilePath(`${dir}/${file}`))
if (stat.isDirectory()) {
const { nbSourceBlocks: nbSourceInChild, nbBlocks: nbBlockInChild } = await this.deduplicationStats(
`${dir}/${file}`
)
nbSourceBlocks += nbSourceInChild
nbBlocks += nbBlockInChild
} else {
nbSourceBlocks++
nbBlocks += stat.nlink - 1 // ignore current
}
},
{ concurrency: 2 }
)
} catch (err) {
if (err.code !== 'ENOENT') {
throw err
}
}
return { nbSourceBlocks, nbBlocks }
}
@synchronized()
async checkSupport() {
const supported = await super.checkSupport()
const sourceFileName = normalizePath(`${Date.now()}.sourcededup`)
const destFileName = normalizePath(`${Date.now()}.destdedup`)
try {
const SIZE = 1024 * 1024
const data = await fromCallback(randomBytes, SIZE)
const hash = this.#hash(data)
await this._outputFile(sourceFileName, data, { flags: 'wx', dedup: false })
await this.#setExtendedAttribute(sourceFileName, this.#attributeKey, hash)
await this.#link(sourceFileName, destFileName)
const linkedData = await this._readFile(destFileName)
const { nlink } = await fs.stat(this.getFilePath(destFileName))
// contains the right data and the link counter
supported.hardLink = nlink === 2 && linkedData.equals(data)
supported.extendedAttributes = hash === (await this.#getExtendedAttribute(sourceFileName, this.#attributeKey))
} catch (error) {
warn(`error while testing the dedup`, { error })
} finally {
ignoreErrors.call(this._unlink(sourceFileName))
ignoreErrors.call(this._unlink(destFileName))
}
return supported
}
}

View File

@@ -0,0 +1,107 @@
import { after, beforeEach, describe, it } from 'node:test'
import assert from 'node:assert'
import fs from 'node:fs/promises'
import { getSyncedHandler } from './index.js'
import { Disposable, pFromCallback } from 'promise-toolbox'
import tmp from 'tmp'
import execa from 'execa'
import { rimraf } from 'rimraf'
import { randomBytes } from 'node:crypto'
// https://xkcd.com/221/
const data =
'H2GbLa0F2J4LHFLRwLP9zN4dGWJpdx1T6eGWra8BRlV9fBpRGtWIOSKXjU8y7fnxAWVGWpbYPYCwRigvxRSTcuaQsCtwvDNKMmFwYpsGMS14akgBD3EpOMPpKIRRySOsOeknpr48oopO1n9eq0PxGbOcY4Q9aojRu9rn1SMNyjq7YGzwVQEm6twA3etKGSYGvPJVTs2riXm7u6BhBh9VZtQDxQEy5ttkHiZUpgLi6QshSpMjL7dHco8k6gzGcxfpoyS5IzaQeXqDOeRjE6HNn27oUXpze5xRYolQhxA7IqdfzcYwWTqlaZb7UBUZoFCiFs5Y6vPlQVZ2Aw5YganLV1ZcIz78j6TAtXJAfXrDhksm9UteQul8RYT0Ur8AJRYgiGXOsXrWWBKm3CzZci6paLZ2jBmGfgVuBJHlvgFIjOHiVozjulGD4SwKQ2MNqUOylv89NTP1BsJuZ7MC6YCm5yix7FswoE7Y2NhDFqzEQvseRQFyz52AsfuqRY7NruKHlO7LOSI932che2WzxBAwy78Sk1eRHQLsZ37dLB4UkFFIq6TvyjJKznTMAcx9HDOSrFeke6KfsDB1A4W3BAxJk40oAcFMeM72Lg97sJExMJRz1m1nGQJEiGCcnll9G6PqEfHjoOhdDLgN2xewUyvbuRuKEXXxD1H6Tz1iWReyRGSagQNLXvqkKoHoxu3bvSi8nWrbtEY6K2eHLeF5bYubYGXc5VsfiCQNPEzQV4ECzaPdolRtbpRFMcB5aWK70Oew3HJkEcN7IkcXI9vlJKnFvFMqGOHKujd4Tyjhvru2UFh0dAkEwojNzz7W0XlASiXRneea9FgiJNLcrXNtBkvIgw6kRrgbXI6DPJdWDpm3fmWS8EpOICH3aTiXRLQUDZsReAaOsfau1FNtP4JKTQpG3b9rKkO5G7vZEWqTi69mtPGWmyOU47WL1ifJtlzGiFbZ30pcHMc0u4uopHwEQq6ZwM5S6NHvioxihhHQHO8JU2xvcjg5OcTEsXtMwIapD3re'
const hash = '09a3cd9e135114cb870a0b5cf0dfd3f4be994662d0c715b65bcfc5e3b635dd40'
const dataPath = 'xo-block-store/09a3/cd9e/1351/14cb/870a/0b5c/f0df/d3f4/be99/4662/d0c7/15b6/5bcf/c5e3/b635/dd40.source'
let dir
describe('dedup tests', () => {
beforeEach(async () => {
dir = await pFromCallback(cb => tmp.dir(cb))
})
after(async () => {
await rimraf(dir)
})
it('works in general case ', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }, { dedup: true }), async handler => {
await handler.outputFile('in/a/sub/folder/file', data, { dedup: true })
assert.doesNotReject(handler.list('xo-block-store'))
assert.strictEqual((await handler.list('xo-block-store')).length, 1)
assert.strictEqual((await handler.list('in/a/sub/folder')).length, 1)
assert.strictEqual((await handler.readFile('in/a/sub/folder/file')).toString('utf-8'), data)
const value = (await execa('getfattr', ['-n', 'user.hash.sha256', '--only-value', dir + '/in/a/sub/folder/file']))
.stdout
assert.strictEqual(value, hash)
// the source file is created
assert.strictEqual((await handler.readFile(dataPath)).toString('utf-8'), data)
await handler.outputFile('in/anotherfolder/file', data, { dedup: true })
assert.strictEqual((await handler.list('in/anotherfolder')).length, 1)
assert.strictEqual((await handler.readFile('in/anotherfolder/file')).toString('utf-8'), data)
await handler.unlink('in/a/sub/folder/file', { dedup: true })
// source is still here
assert.strictEqual((await handler.readFile(dataPath)).toString('utf-8'), data)
assert.strictEqual((await handler.readFile('in/anotherfolder/file')).toString('utf-8'), data)
await handler.unlink('in/anotherfolder/file', { dedup: true })
// source should have been deleted
assert.strictEqual(
(
await handler.list(
'xo-block-store/09a3/cd9e/1351/14cb/870a/0b5c/f0df/d3f4/be99/4662/d0c7/15b6/5bcf/c5e3/b635'
)
).length,
0
)
assert.strictEqual((await handler.list('in/anotherfolder')).length, 0)
})
})
it('garbage collector an stats ', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }, { dedup: true }), async handler => {
await handler.outputFile('in/anotherfolder/file', data, { dedup: true })
await handler.outputFile('in/anotherfolder/same', data, { dedup: true })
await handler.outputFile('in/a/sub/folder/file', randomBytes(1024), { dedup: true })
let stats = await handler.deduplicationStats()
assert.strictEqual(stats.nbBlocks, 3)
assert.strictEqual(stats.nbSourceBlocks, 2)
await fs.unlink(`${dir}/in/a/sub/folder/file`, { dedup: true })
assert.strictEqual((await handler.list('xo-block-store')).length, 2)
await handler.deduplicationGarbageCollector()
stats = await handler.deduplicationStats()
assert.strictEqual(stats.nbBlocks, 2)
assert.strictEqual(stats.nbSourceBlocks, 1)
assert.strictEqual((await handler.list('xo-block-store')).length, 1)
})
})
it('compute support', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }, { dedup: true }), async handler => {
const supported = await handler.checkSupport()
assert.strictEqual(supported.hardLink, true, 'support hard link is not present in local fs')
assert.strictEqual(supported.extendedAttributes, true, 'support extended attributes is not present in local fs')
})
})
it('handles edge cases : source deleted', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }, { dedup: true }), async handler => {
await handler.outputFile('in/a/sub/folder/edge', data, { dedup: true })
await handler.unlink(dataPath, { dedup: true })
// no error if source si already deleted
await assert.doesNotReject(() => handler.unlink('in/a/sub/folder/edge', { dedup: true }))
})
})
it('handles edge cases : non deduplicated file ', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }, { dedup: true }), async handler => {
await handler.outputFile('in/a/sub/folder/edge', data, { dedup: false })
// no error if deleting a non dedup file with dedup flags
await assert.doesNotReject(() => handler.unlink('in/a/sub/folder/edge', { dedup: true }))
})
})
})

View File

@@ -228,6 +228,11 @@ export default class S3Handler extends RemoteHandlerAbstract {
},
})
async _writeFile(file, data, options) {
if (options?.dedup ?? false) {
throw new Error(
"S3 remotes don't support deduplication from XO, please use the deduplication of your S3 provider if any"
)
}
return this.#s3.send(
new PutObjectCommand({
...this.#createParams(file),

View File

@@ -2,6 +2,9 @@
## **next**
- Ability to export selected VMs as CSV file (PR [#6915](https://github.com/vatesfr/xen-orchestra/pull/6915))
- [Pool/VMs] Ability to export selected VMs as JSON file (PR [#6911](https://github.com/vatesfr/xen-orchestra/pull/6911))
## **0.1.1** (2023-07-03)
- Invalidate sessionId token after logout (PR [#6480](https://github.com/vatesfr/xen-orchestra/pull/6480))

View File

@@ -17,6 +17,7 @@
"@fortawesome/vue-fontawesome": "^3.0.1",
"@novnc/novnc": "^1.3.0",
"@types/d3-time-format": "^4.0.0",
"@types/file-saver": "^2.0.5",
"@types/lodash-es": "^4.17.6",
"@types/marked": "^4.0.8",
"@vueuse/core": "^10.1.2",
@@ -25,6 +26,7 @@
"d3-time-format": "^4.1.0",
"decorator-synchronized": "^0.6.0",
"echarts": "^5.3.3",
"file-saver": "^2.0.5",
"highlight.js": "^11.6.0",
"human-format": "^1.1.0",
"iterable-backoff": "^0.1.0",

View File

@@ -1,21 +1,48 @@
<template>
<UiCard :color="hasError ? 'error' : undefined">
<UiCardTitle>{{ $t("cpu-usage") }}</UiCardTitle>
<UiCardTitle>
{{ $t("cpu-usage") }}
<template v-if="vmStatsCanBeExpired || hostStatsCanBeExpired" #right>
<UiSpinner v-tooltip="$t('fetching-fresh-data')" />
</template>
</UiCardTitle>
<HostsCpuUsage />
<VmsCpuUsage />
</UiCard>
</template>
<script lang="ts" setup>
import { vTooltip } from "@/directives/tooltip.directive";
import HostsCpuUsage from "@/components/pool/dashboard/cpuUsage/HostsCpuUsage.vue";
import VmsCpuUsage from "@/components/pool/dashboard/cpuUsage/VmsCpuUsage.vue";
import UiCard from "@/components/ui/UiCard.vue";
import UiCardTitle from "@/components/ui/UiCardTitle.vue";
import { useHostStore } from "@/stores/host.store";
import { useVmStore } from "@/stores/vm.store";
import { computed } from "vue";
import { computed, inject, type ComputedRef } from "vue";
import type { Stat } from "@/composables/fetch-stats.composable";
import type { HostStats, VmStats } from "@/libs/xapi-stats";
import UiSpinner from "@/components/ui/UiSpinner.vue";
const { hasError: hasVmError } = useVmStore().subscribe();
const { hasError: hasHostError } = useHostStore().subscribe();
const vmStats = inject<ComputedRef<Stat<VmStats>[]>>(
"vmStats",
computed(() => [])
);
const hostStats = inject<ComputedRef<Stat<HostStats>[]>>(
"hostStats",
computed(() => [])
);
const vmStatsCanBeExpired = computed(() =>
vmStats.value.some((stat) => stat.canBeExpired)
);
const hostStatsCanBeExpired = computed(() =>
hostStats.value.some((stat) => stat.canBeExpired)
);
const hasError = computed(() => hasVmError.value || hasHostError.value);
</script>

View File

@@ -1,5 +1,6 @@
<template>
<!-- TODO: add a loader when data is not fully loaded or undefined -->
<!-- TODO: add small loader with tooltips when stats can be expired -->
<!-- TODO: display the NoData component in case of a data recovery error -->
<LinearChart
:data="data"

View File

@@ -1,22 +1,50 @@
<template>
<UiCard :color="hasError ? 'error' : undefined">
<UiCardTitle>{{ $t("ram-usage") }}</UiCardTitle>
<UiCardTitle>
{{ $t("ram-usage") }}
<template v-if="vmStatsCanBeExpired || hostStatsCanBeExpired" #right>
<UiSpinner v-tooltip="$t('fetching-fresh-data')" />
</template>
</UiCardTitle>
<HostsRamUsage />
<VmsRamUsage />
</UiCard>
</template>
<script lang="ts" setup>
import { vTooltip } from "@/directives/tooltip.directive";
import HostsRamUsage from "@/components/pool/dashboard/ramUsage/HostsRamUsage.vue";
import VmsRamUsage from "@/components/pool/dashboard/ramUsage/VmsRamUsage.vue";
import UiCard from "@/components/ui/UiCard.vue";
import UiCardTitle from "@/components/ui/UiCardTitle.vue";
import { useHostStore } from "@/stores/host.store";
import { useVmStore } from "@/stores/vm.store";
import { computed } from "vue";
import { computed, inject } from "vue";
import type { ComputedRef } from "vue";
import type { HostStats, VmStats } from "@/libs/xapi-stats";
import type { Stat } from "@/composables/fetch-stats.composable";
import UiSpinner from "@/components/ui/UiSpinner.vue";
const { hasError: hasVmError } = useVmStore().subscribe();
const { hasError: hasHostError } = useHostStore().subscribe();
const vmStats = inject<ComputedRef<Stat<VmStats>[]>>(
"vmStats",
computed(() => [])
);
const hostStats = inject<ComputedRef<Stat<HostStats>[]>>(
"hostStats",
computed(() => [])
);
const vmStatsCanBeExpired = computed(() =>
vmStats.value.some((stat) => stat.canBeExpired)
);
const hostStatsCanBeExpired = computed(() =>
hostStats.value.some((stat) => stat.canBeExpired)
);
const hasError = computed(() => hasVmError.value || hasHostError.value);
</script>

View File

@@ -1,5 +1,6 @@
<template>
<!-- TODO: add a loader when data is not fully loaded or undefined -->
<!-- TODO: add small loader with tooltips when stats can be expired -->
<!-- TODO: Display the NoDataError component in case of a data recovery error -->
<LinearChart
:data="data"

View File

@@ -1,5 +1,6 @@
<template>
<!-- TODO: add a loader when data is not fully loaded or undefined -->
<!-- TODO: add small loader with tooltips when stats can be expired -->
<!-- TODO: display the NoDataError component in case of a data recovery error -->
<LinearChart
:data="data"

View File

@@ -0,0 +1,51 @@
<template>
<MenuItem :icon="faFileExport">
{{ $t("export") }}
<template #submenu>
<MenuItem
v-tooltip="{ content: $t('coming-soon'), placement: 'left' }"
:icon="faDisplay"
>
{{ $t("export-vms") }}
</MenuItem>
<MenuItem
:icon="faCode"
@click="
exportVmsAsJsonFile(vms, `vms_${new Date().toISOString()}.json`)
"
>
{{ $t("export-table-to", { type: ".json" }) }}
</MenuItem>
<MenuItem
:icon="faFileCsv"
@click="exportVmsAsCsvFile(vms, `vms_${new Date().toISOString()}.csv`)"
>
{{ $t("export-table-to", { type: ".csv" }) }}
</MenuItem>
</template>
</MenuItem>
</template>
<script lang="ts" setup>
import { computed } from "vue";
import { exportVmsAsCsvFile, exportVmsAsJsonFile } from "@/libs/vm";
import MenuItem from "@/components/menu/MenuItem.vue";
import {
faCode,
faDisplay,
faFileCsv,
faFileExport,
} from "@fortawesome/free-solid-svg-icons";
import { useVmStore } from "@/stores/vm.store";
import { vTooltip } from "@/directives/tooltip.directive";
import type { XenApiVm } from "@/libs/xen-api";
const props = defineProps<{
vmRefs: XenApiVm["$ref"][];
}>();
const { getByOpaqueRef: getVm } = useVmStore().subscribe();
const vms = computed(() =>
props.vmRefs.map(getVm).filter((vm): vm is XenApiVm => vm !== undefined)
);
</script>

View File

@@ -25,30 +25,8 @@
<MenuItem v-tooltip="$t('coming-soon')" :icon="faCamera">
{{ $t("snapshot") }}
</MenuItem>
<VmActionExportItem :vm-refs="selectedRefs" />
<VmActionDeleteItem :vm-refs="selectedRefs" />
<MenuItem :icon="faFileExport">
{{ $t("export") }}
<template #submenu>
<MenuItem
v-tooltip="{ content: $t('coming-soon'), placement: 'left' }"
:icon="faDisplay"
>
{{ $t("export-vms") }}
</MenuItem>
<MenuItem
v-tooltip="{ content: $t('coming-soon'), placement: 'left' }"
:icon="faCode"
>
{{ $t("export-table-to", { type: ".json" }) }}
</MenuItem>
<MenuItem
v-tooltip="{ content: $t('coming-soon'), placement: 'left' }"
:icon="faFileCsv"
>
{{ $t("export-table-to", { type: ".csv" }) }}
</MenuItem>
</template>
</MenuItem>
</AppMenu>
</template>
@@ -57,6 +35,7 @@ import AppMenu from "@/components/menu/AppMenu.vue";
import MenuItem from "@/components/menu/MenuItem.vue";
import UiButton from "@/components/ui/UiButton.vue";
import VmActionCopyItem from "@/components/vm/VmActionItems/VmActionCopyItem.vue";
import VmActionExportItem from "@/components/vm/VmActionItems/VmActionExportItem.vue";
import VmActionDeleteItem from "@/components/vm/VmActionItems/VmActionDeleteItem.vue";
import VmActionPowerStateItems from "@/components/vm/VmActionItems/VmActionPowerStateItems.vue";
import { vTooltip } from "@/directives/tooltip.directive";
@@ -64,12 +43,8 @@ import type { XenApiVm } from "@/libs/xen-api";
import { useUiStore } from "@/stores/ui.store";
import {
faCamera,
faCode,
faDisplay,
faEdit,
faEllipsis,
faFileCsv,
faFileExport,
faPowerOff,
faRoute,
} from "@fortawesome/free-solid-svg-icons";

View File

@@ -10,6 +10,7 @@ import { type Pausable, promiseTimeout, useTimeoutPoll } from "@vueuse/core";
import { computed, type ComputedRef, onUnmounted, ref } from "vue";
export type Stat<T> = {
canBeExpired: boolean;
id: string;
name: string;
stats: T | undefined;
@@ -21,8 +22,10 @@ type GetStats<
S extends HostStats | VmStats
> = (
uuid: T["uuid"],
granularity: GRANULARITY
) => Promise<XapiStatsResponse<S>> | undefined;
granularity: GRANULARITY,
ignoreExpired: boolean,
opts: { abortSignal?: AbortSignal }
) => Promise<XapiStatsResponse<S> | undefined> | undefined;
export type FetchedStats<
T extends XenApiHost | XenApiVm,
@@ -41,6 +44,7 @@ export default function useFetchStats<
>(getStats: GetStats<T, S>, granularity: GRANULARITY): FetchedStats<T, S> {
const stats = ref<Map<string, Stat<S>>>(new Map());
const timestamp = ref<number[]>([0, 0]);
const abortController = new AbortController();
const register = (object: T) => {
const mapKey = `${object.uuid}-${granularity}`;
@@ -49,13 +53,18 @@ export default function useFetchStats<
return;
}
const ignoreExpired = computed(() => !stats.value.has(mapKey));
const pausable = useTimeoutPoll(
async () => {
if (!stats.value.has(mapKey)) {
return;
}
const newStats = await getStats(object.uuid, granularity);
const newStats = (await getStats(
object.uuid,
granularity,
ignoreExpired.value,
{
abortSignal: abortController.signal,
}
)) as XapiStatsResponse<S>;
if (newStats === undefined) {
return;
@@ -69,6 +78,7 @@ export default function useFetchStats<
];
stats.value.get(mapKey)!.stats = newStats.stats;
stats.value.get(mapKey)!.canBeExpired = newStats.canBeExpired;
await promiseTimeout(newStats.interval * 1000);
},
0,
@@ -76,6 +86,7 @@ export default function useFetchStats<
);
stats.value.set(mapKey, {
canBeExpired: false,
id: object.uuid,
name: object.name_label,
stats: undefined,
@@ -90,6 +101,7 @@ export default function useFetchStats<
};
onUnmounted(() => {
abortController.abort();
stats.value.forEach((stat) => stat.pausable.pause());
});

View File

@@ -0,0 +1,41 @@
import { saveAs } from "file-saver";
import type { XenApiVm } from "@/libs/xen-api";
function stringifyCsvValue(value: any) {
let res = "";
if (Array.isArray(value)) {
res = value.join(";");
} else if (typeof value === "object") {
res = JSON.stringify(value);
} else {
res = String(value);
}
return `"${res.replace(/"/g, '""')}"`;
}
export function exportVmsAsCsvFile(vms: XenApiVm[], fileName: string) {
const csvHeaders = Object.keys(vms[0]);
const csvRows = vms.map((vm) =>
csvHeaders.map((header) => stringifyCsvValue(vm[header as keyof XenApiVm]))
);
saveAs(
new Blob(
[[csvHeaders, ...csvRows].map((row) => row.join(",")).join("\n")],
{
type: "text/csv;charset=utf-8",
}
),
fileName
);
}
export function exportVmsAsJsonFile(vms: XenApiVm[], fileName: string) {
saveAs(
new Blob([JSON.stringify(vms, null, 2)], {
type: "application/json",
}),
fileName
);
}

View File

@@ -295,18 +295,22 @@ export type HostStats = {
};
export type XapiStatsResponse<T> = {
canBeExpired: boolean;
endTimestamp: number;
interval: number;
stats: T;
};
type StatsByObject = {
[uuid: string]: {
[step: string]: XapiStatsResponse<HostStats | VmStats>;
};
};
export default class XapiStats {
#xapi;
#statsByObject: {
[uuid: string]: {
[step: string]: XapiStatsResponse<HostStats | any>;
};
} = {};
#statsByObject: StatsByObject = {};
#cachedStatsByObject: StatsByObject = {};
constructor(xapi: XenApi) {
this.#xapi = xapi;
}
@@ -314,7 +318,12 @@ export default class XapiStats {
// Execute one http request on a XenServer for get stats
// Return stats (Json format) or throws got exception
@limitConcurrency(3)
async _getJson(host: XenApiHost, timestamp: any, step: any) {
async _getJson(
host: XenApiHost,
timestamp: number,
step: RRD_STEP,
{ abortSignal }: { abortSignal?: AbortSignal } = {}
) {
const resp = await this.#xapi.getResource("/rrd_updates", {
host,
query: {
@@ -324,13 +333,23 @@ export default class XapiStats {
json: "true",
start: timestamp,
},
abortSignal,
});
return JSON5.parse(await resp.text());
}
// To avoid multiple requests, we keep a cache for the stats and
// only return it if we not exceed a step
#getCachedStats(uuid: any, step: any, currentTimeStamp: any) {
#getCachedStats(
uuid: string,
step: RRD_STEP,
currentTimeStamp: number,
ignoreExpired = false
) {
if (ignoreExpired) {
return this.#cachedStatsByObject[uuid]?.[step];
}
const statsByObject = this.#statsByObject;
const stats = statsByObject[uuid]?.[step];
@@ -347,12 +366,16 @@ export default class XapiStats {
}
@synchronized.withKey(({ host }: { host: XenApiHost }) => host.uuid)
async _getAndUpdateStats({
async _getAndUpdateStats<T extends VmStats | HostStats>({
abortSignal,
host,
ignoreExpired = false,
uuid,
granularity,
}: {
abortSignal?: AbortSignal;
host: XenApiHost;
ignoreExpired?: boolean;
uuid: any;
granularity: GRANULARITY;
}) {
@@ -367,7 +390,13 @@ export default class XapiStats {
}
const currentTimeStamp = Math.floor(new Date().getTime() / 1000);
const stats = this.#getCachedStats(uuid, step, currentTimeStamp);
const stats = this.#getCachedStats(
uuid,
step,
currentTimeStamp,
ignoreExpired
) as XapiStatsResponse<T>;
if (stats !== undefined) {
return stats;
}
@@ -376,75 +405,113 @@ export default class XapiStats {
// To avoid crossing over the boundary, we ask for one less step
const optimumTimestamp = currentTimeStamp - maxDuration + step;
const json = await this._getJson(host, optimumTimestamp, step);
const actualStep = json.meta.step as number;
try {
const json = await this._getJson(host, optimumTimestamp, step, {
abortSignal,
});
if (json.data.length > 0) {
// fetched data is organized from the newest to the oldest
// but this implementation requires it in the other direction
json.data.reverse();
json.meta.legend.forEach((legend: any, index: number) => {
const [, type, uuid, metricType] = /^AVERAGE:([^:]+):(.+):(.+)$/.exec(
legend
) as any;
const actualStep = json.meta.step as number;
const metrics = STATS[type] as any;
if (metrics === undefined) {
return;
}
if (json.data.length > 0) {
// fetched data is organized from the newest to the oldest
// but this implementation requires it in the other direction
json.data.reverse();
json.meta.legend.forEach((legend: any, index: number) => {
const [, type, uuid, metricType] = /^AVERAGE:([^:]+):(.+):(.+)$/.exec(
legend
) as any;
const { metric, testResult } = findMetric(metrics, metricType) as any;
if (metric === undefined) {
return;
}
const xoObjectStats = createGetProperty(this.#statsByObject, uuid, {});
let stepStats = xoObjectStats[actualStep];
if (
stepStats === undefined ||
stepStats.endTimestamp !== json.meta.end
) {
stepStats = xoObjectStats[actualStep] = {
endTimestamp: json.meta.end,
interval: actualStep,
};
}
const path =
metric.getPath !== undefined
? metric.getPath(testResult)
: [findKey(metrics, metric)];
const lastKey = path.length - 1;
let metricStats = createGetProperty(stepStats, "stats", {});
path.forEach((property: any, key: number) => {
if (key === lastKey) {
metricStats[property] = computeValues(
json.data,
index,
metric.transformValue
);
const metrics = STATS[type] as any;
if (metrics === undefined) {
return;
}
metricStats = createGetProperty(metricStats, property, {});
const { metric, testResult } = findMetric(metrics, metricType) as any;
if (metric === undefined) {
return;
}
const xoObjectStats = createGetProperty(
this.#statsByObject,
uuid,
{}
);
const cacheXoObjectStats = createGetProperty(
this.#cachedStatsByObject,
uuid,
{}
);
let stepStats = xoObjectStats[actualStep];
let cacheStepStats = cacheXoObjectStats[actualStep];
if (
stepStats === undefined ||
stepStats.endTimestamp !== json.meta.end
) {
stepStats = xoObjectStats[actualStep] = {
endTimestamp: json.meta.end,
interval: actualStep,
canBeExpired: false,
};
cacheStepStats = cacheXoObjectStats[actualStep] = {
endTimestamp: json.meta.end,
interval: actualStep,
canBeExpired: true,
};
}
const path =
metric.getPath !== undefined
? metric.getPath(testResult)
: [findKey(metrics, metric)];
const lastKey = path.length - 1;
let metricStats = createGetProperty(stepStats, "stats", {});
let cacheMetricStats = createGetProperty(cacheStepStats, "stats", {});
path.forEach((property: any, key: number) => {
if (key === lastKey) {
metricStats[property] = computeValues(
json.data,
index,
metric.transformValue
);
cacheMetricStats[property] = computeValues(
json.data,
index,
metric.transformValue
);
return;
}
metricStats = createGetProperty(metricStats, property, {});
cacheMetricStats = createGetProperty(
cacheMetricStats,
property,
{}
);
});
});
});
}
if (actualStep !== step) {
throw new FaultyGranularity(
`Unable to get the true granularity: ${actualStep}`
);
}
return (
this.#statsByObject[uuid]?.[step] ?? {
endTimestamp: currentTimeStamp,
interval: step,
stats: {},
}
);
if (actualStep !== step) {
throw new FaultyGranularity(
`Unable to get the true granularity: ${actualStep}`
);
}
} catch (error) {
if (error instanceof Error && error.name === "AbortError") {
return;
}
throw error;
}
return (this.#statsByObject[uuid]?.[step] ?? {
endTimestamp: currentTimeStamp,
interval: step,
stats: {},
}) as XapiStatsResponse<T>;
}
}

View File

@@ -266,7 +266,11 @@ export default class XenApi {
async getResource(
pathname: string,
{ host, query }: { host: XenApiHost; query: any }
{
abortSignal,
host,
query,
}: { abortSignal?: AbortSignal; host: XenApiHost; query: any }
) {
const url = new URL("http://localhost");
url.protocol = window.location.protocol;
@@ -277,7 +281,7 @@ export default class XenApi {
session_id: this.#sessionId,
}).toString();
return fetch(url);
return fetch(url, { signal: abortSignal });
}
async loadRecords<T extends XenApiRecord<string>>(

View File

@@ -36,6 +36,7 @@
"export": "Export",
"export-table-to": "Export table to {type}",
"export-vms": "Export VMs",
"fetching-fresh-data": "Fetching fresh data",
"filter": {
"comparison": {
"contains": "Contains",

View File

@@ -36,6 +36,7 @@
"export": "Exporter",
"export-table-to": "Exporter le tableau en {type}",
"export-vms": "Exporter les VMs",
"fetching-fresh-data": "Récupération de données à jour",
"filter": {
"comparison": {
"contains": "Contient",

View File

@@ -1,5 +1,9 @@
import { isHostRunning, sortRecordsByNameLabel } from "@/libs/utils";
import type { GRANULARITY, XapiStatsResponse } from "@/libs/xapi-stats";
import type {
GRANULARITY,
HostStats,
XapiStatsResponse,
} from "@/libs/xapi-stats";
import type { XenApiHost, XenApiHostMetrics } from "@/libs/xen-api";
import { useXapiCollectionStore } from "@/stores/xapi-collection.store";
import { useXenApiStore } from "@/stores/xen-api.store";
@@ -8,11 +12,15 @@ import { createSubscribe } from "@/types/xapi-collection";
import { defineStore } from "pinia";
import { computed, type ComputedRef } from "vue";
type GetStats = (
hostUuid: XenApiHost["uuid"],
granularity: GRANULARITY,
ignoreExpired: boolean,
opts: { abortSignal?: AbortSignal }
) => Promise<XapiStatsResponse<HostStats> | undefined> | undefined;
type GetStatsExtension = {
getStats: (
hostUuid: XenApiHost["uuid"],
granularity: GRANULARITY
) => Promise<XapiStatsResponse<any>> | undefined;
getStats: GetStats;
};
type RunningHostsExtension = [
@@ -31,9 +39,11 @@ export const useHostStore = defineStore("host", () => {
const subscribe = createSubscribe<XenApiHost, Extensions>((options) => {
const originalSubscription = hostCollection.subscribe(options);
const getStats = (
hostUuid: XenApiHost["uuid"],
granularity: GRANULARITY
const getStats: GetStats = (
hostUuid,
granularity,
ignoreExpired = false,
{ abortSignal }
) => {
const host = originalSubscription.getByUuid(hostUuid);
@@ -45,8 +55,10 @@ export const useHostStore = defineStore("host", () => {
? xenApiStore.getXapiStats()
: undefined;
return xapiStats?._getAndUpdateStats({
return xapiStats?._getAndUpdateStats<HostStats>({
abortSignal,
host,
ignoreExpired,
uuid: host.uuid,
granularity,
});

View File

@@ -1,5 +1,9 @@
import { sortRecordsByNameLabel } from "@/libs/utils";
import type { GRANULARITY, XapiStatsResponse } from "@/libs/xapi-stats";
import type {
GRANULARITY,
VmStats,
XapiStatsResponse,
} from "@/libs/xapi-stats";
import type { XenApiHost, XenApiVm } from "@/libs/xen-api";
import { POWER_STATE } from "@/libs/xen-api";
import { useXapiCollectionStore } from "@/stores/xapi-collection.store";
@@ -8,6 +12,12 @@ import { createSubscribe, type Subscription } from "@/types/xapi-collection";
import { defineStore } from "pinia";
import { computed, type ComputedRef } from "vue";
type GetStats = (
id: XenApiVm["uuid"],
granularity: GRANULARITY,
ignoreExpired: boolean,
opts: { abortSignal?: AbortSignal }
) => Promise<XapiStatsResponse<VmStats> | undefined> | undefined;
type DefaultExtension = {
recordsByHostRef: ComputedRef<Map<XenApiHost["$ref"], XenApiVm[]>>;
runningVms: ComputedRef<XenApiVm[]>;
@@ -15,10 +25,7 @@ type DefaultExtension = {
type GetStatsExtension = [
{
getStats: (
id: XenApiVm["uuid"],
granularity: GRANULARITY
) => Promise<XapiStatsResponse<any>>;
getStats: GetStats;
},
{ hostSubscription: Subscription<XenApiHost, object> }
];
@@ -60,33 +67,49 @@ export const useVmStore = defineStore("vm", () => {
const hostSubscription = options?.hostSubscription;
const getStatsSubscription = hostSubscription !== undefined && {
getStats: (vmUuid: XenApiVm["uuid"], granularity: GRANULARITY) => {
const xenApiStore = useXenApiStore();
if (!xenApiStore.isConnected) {
return undefined;
const getStatsSubscription:
| {
getStats: GetStats;
}
| undefined =
hostSubscription !== undefined
? {
getStats: (
id,
granularity,
ignoreExpired = false,
{ abortSignal }
) => {
const xenApiStore = useXenApiStore();
const vm = originalSubscription.getByUuid(vmUuid);
if (!xenApiStore.isConnected) {
return undefined;
}
if (vm === undefined) {
throw new Error(`VM ${vmUuid} could not be found.`);
}
const vm = originalSubscription.getByUuid(id);
const host = hostSubscription.getByOpaqueRef(vm.resident_on);
if (vm === undefined) {
throw new Error(`VM ${id} could not be found.`);
}
if (host === undefined) {
throw new Error(`VM ${vmUuid} is halted or host could not be found.`);
}
const host = hostSubscription.getByOpaqueRef(vm.resident_on);
return xenApiStore.getXapiStats()._getAndUpdateStats({
host,
uuid: vm.uuid,
granularity,
});
},
};
if (host === undefined) {
throw new Error(
`VM ${id} is halted or host could not be found.`
);
}
return xenApiStore.getXapiStats()._getAndUpdateStats<VmStats>({
abortSignal,
host,
ignoreExpired,
uuid: vm.uuid,
granularity,
});
},
}
: undefined;
return {
...originalSubscription,

View File

@@ -15,6 +15,8 @@
- [Incremental Backup & Replication] Attempt to work around HVM multiplier issues when creating VMs on older XAPIs (PR [#6866](https://github.com/vatesfr/xen-orchestra/pull/6866))
- [REST API] Fix VDI export when NBD is enabled
- [XO Config Cloud Backup] Improve wording about passphrase (PR [#6938](https://github.com/vatesfr/xen-orchestra/pull/6938))
- [Pool] Fix IPv6 handling when adding hosts
### Packages to release
@@ -32,10 +34,15 @@
<!--packages-start-->
- @vates/fuse-vhd major
- @vates/nbd-client major
- @vates/node-vsphere-soap major
- @xen-orchestra/backups minor
- @xen-orchestra/xapi major
- complex-matcher patch
- xen-api patch
- xo-server patch
- xo-server-audit patch
- xo-web minor
<!--packages-end-->

View File

@@ -104,7 +104,7 @@ describe('VhdAbstract', async () => {
it('renames and unlink a VhdDirectory', async () => {
const initalSize = 4
const vhdDirectory = `${tempDir}/randomfile.dir`
await createRandomVhdDirectory(vhdDirectory, initalSize)
await createRandomVhdDirectory(vhdDirectory, initalSize, { dedup: true })
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file:///' })
@@ -116,11 +116,24 @@ describe('VhdAbstract', async () => {
// it should clean an existing directory
await fs.mkdir(targetFileName)
await fs.writeFile(`${targetFileName}/dummy`, 'I exists')
await VhdAbstract.unlink(handler, `${targetFileName}/dummy`)
await VhdAbstract.unlink(handler, `${targetFileName}`)
assert.equal(await fs.exists(`${targetFileName}/dummy`), false)
})
})
it('unlinks a deduplicated VhdDirectory', async () => {
const initalSize = 4
const vhdDirectory = `${tempDir}/random.vhd`
await createRandomVhdDirectory(vhdDirectory, initalSize, { dedup: true })
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file:///' })
await VhdAbstract.unlink(handler, vhdDirectory)
assert.equal(await fs.exists(vhdDirectory), false)
})
})
it('Creates, renames and unlink alias', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`

View File

@@ -206,7 +206,14 @@ exports.VhdAbstract = class VhdAbstract {
await handler.unlink(resolved)
} catch (err) {
if (err.code === 'EISDIR') {
await handler.rmtree(resolved)
// @todo : should we open it ?
const chunkFilters = await handler.readFile(resolved + '/chunk-filters.json').then(JSON.parse, error => {
if (error.code === 'ENOENT') {
return []
}
throw error
})
await handler.rmtree(resolved, { dedup: chunkFilters[1] === true })
} else {
throw err
}

View File

@@ -19,6 +19,7 @@ const NULL_COMPRESSOR = {
}
const COMPRESSORS = {
none: NULL_COMPRESSOR,
gzip: {
compress: (
gzip => buffer =>
@@ -78,6 +79,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
#header
footer
#compressor
#dedup
get compressionType() {
return this.#compressor.id
@@ -102,8 +104,9 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
this.#uncheckedBlockTable = blockTable
}
static async open(handler, path, { flags = 'r+' } = {}) {
const vhd = new VhdDirectory(handler, path, { flags })
static async open(handler, path, { compression, flags = 'r+' } = {}) {
const dedup = path.endsWith('dedup.vhd')
const vhd = new VhdDirectory(handler, path, { compression, dedup, flags })
// openning a file for reading does not trigger EISDIR as long as we don't really read from it :
// https://man7.org/linux/man-pages/man2/open.2.html
@@ -117,9 +120,9 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
}
}
static async create(handler, path, { flags = 'wx+', compression } = {}) {
static async create(handler, path, { flags = 'wx+', compression, dedup } = {}) {
await handler.mktree(path)
const vhd = new VhdDirectory(handler, path, { flags, compression })
const vhd = new VhdDirectory(handler, path, { flags, compression, dedup })
return {
dispose: () => {},
value: vhd,
@@ -132,6 +135,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
this._path = path
this._opts = opts
this.#compressor = getCompressor(opts?.compression)
this.#dedup = opts?.dedup ?? false
this.writeBlockAllocationTable = synchronized()(this.writeBlockAllocationTable)
}
@@ -158,7 +162,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
}
}
async _writeChunk(partName, buffer) {
async _writeChunk(partName, buffer, dedup = false) {
assert.notStrictEqual(
this._opts?.flags,
'r',
@@ -168,7 +172,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
// in case of VhdDirectory, we want to create the file if it does not exists
const flags = this._opts?.flags === 'r+' ? 'w' : this._opts?.flags
const compressed = await this.#compressor.compress(buffer)
return this._handler.outputFile(this.#getChunkPath(partName), compressed, { flags })
return this._handler.outputFile(this.#getChunkPath(partName), compressed, { flags, dedup })
}
// put block in subdirectories to limit impact when doing directory listing
@@ -262,6 +266,10 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
}
try {
const blockExists = this.containsBlock(blockId)
if (blockExists && this.#dedup) {
// this will trigger the dedup store cleaning if needed
await this._handler.unlink(this._getFullBlockPath(blockId), { dedup: true })
}
await this._handler.rename(childBlockPath, this._getFullBlockPath(blockId))
if (!blockExists) {
setBitmap(this.#blockTable, blockId)
@@ -285,7 +293,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
}
async writeEntireBlock(block) {
await this._writeChunk(this.#getBlockPath(block.id), block.buffer)
await this._writeChunk(this.#getBlockPath(block.id), block.buffer, this.#dedup)
setBitmap(this.#blockTable, block.id)
}

View File

@@ -8,8 +8,8 @@ const { asyncEach } = require('@vates/async-each')
const { warn } = createLogger('vhd-lib:createVhdDirectoryFromStream')
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression }) {
const vhd = yield VhdDirectory.create(handler, path, { compression })
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression, dedup }) {
const vhd = yield VhdDirectory.create(handler, path, { compression, dedup })
await asyncEach(
parseVhdStream(inputStream),
async function (item) {
@@ -45,10 +45,10 @@ exports.createVhdDirectoryFromStream = async function createVhdDirectoryFromStre
handler,
path,
inputStream,
{ validator, concurrency = 16, compression } = {}
{ validator, concurrency = 16, compression, dedup } = {}
) {
try {
const size = await buildVhd(handler, path, inputStream, { concurrency, compression })
const size = await buildVhd(handler, path, inputStream, { concurrency, compression, dedup })
if (validator !== undefined) {
await validator.call(this, path)
}

View File

@@ -62,7 +62,7 @@ exports.recoverRawContent = async function recoverRawContent(vhdName, rawName, o
}
// @ todo how can I call vhd-cli copy from here
async function convertToVhdDirectory(rawFileName, vhdFileName, path) {
async function convertToVhdDirectory(rawFileName, vhdFileName, path, { dedup = false } = {}) {
fs.mkdirp(path)
const srcVhd = await fs.open(vhdFileName, 'r')
@@ -95,15 +95,17 @@ async function convertToVhdDirectory(rawFileName, vhdFileName, path) {
await fs.read(srcRaw, blockData, 0, blockData.length, offset)
await fs.writeFile(path + '/blocks/0/' + i, Buffer.concat([bitmap, blockData]))
}
await fs.writeFile(path + '/chunk-filters.json', JSON.stringify(['none', dedup]))
await fs.close(srcRaw)
}
exports.convertToVhdDirectory = convertToVhdDirectory
exports.createRandomVhdDirectory = async function createRandomVhdDirectory(path, sizeMB) {
exports.createRandomVhdDirectory = async function createRandomVhdDirectory(path, sizeMB, { dedup = false } = {}) {
fs.mkdirp(path)
const rawFileName = `${path}/temp.raw`
await createRandomFile(rawFileName, sizeMB)
const vhdFileName = `${path}/temp.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
await convertToVhdDirectory(rawFileName, vhdFileName, path)
await convertToVhdDirectory(rawFileName, vhdFileName, path, { dedup })
}

View File

@@ -954,6 +954,8 @@ export class Xapi extends EventEmitter {
url,
agent: this.httpAgent,
})
const { hostname } = url
url.hostnameRaw = hostname[0] === '[' ? hostname.slice(1, -1) : hostname
this._url = url
}

View File

@@ -30,14 +30,12 @@ const parseResult = result => {
return result.Value
}
const removeBrackets = hostname => (hostname[0] === '[' ? hostname.slice(1, -1) : hostname)
export default ({ secureOptions, url: { hostname, pathname, port, protocol }, agent }) => {
export default ({ secureOptions, url: { hostnameRaw, pathname, port, protocol }, agent }) => {
const secure = protocol === 'https:'
const client = (secure ? createSecureClient : createClient)({
...(secure ? secureOptions : undefined),
agent,
host: removeBrackets(hostname),
host: hostnameRaw,
pathname,
port,
})

View File

@@ -31,6 +31,7 @@ const DEFAULT_BLOCKED_LIST = {
'job.getAll': true,
'log.get': true,
'metadataBackup.getAllJobs': true,
'mirrorBackup.getAllJobs': true,
'network.getBondModes': true,
'pif.getIpv4ConfigurationModes': true,
'plugin.get': true,

View File

@@ -1,4 +1,3 @@
import defaults from 'lodash/defaults.js'
import findKey from 'lodash/findKey.js'
import forEach from 'lodash/forEach.js'
import identity from 'lodash/identity.js'
@@ -10,9 +9,7 @@ import sum from 'lodash/sum.js'
import uniq from 'lodash/uniq.js'
import zipWith from 'lodash/zipWith.js'
import { BaseError } from 'make-error'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { parseDateTime } from '@xen-orchestra/xapi'
import { synchronized } from 'decorator-synchronized'
export class FaultyGranularity extends BaseError {}
@@ -65,8 +62,6 @@ const computeValues = (dataRow, legendIndex, transformValue = identity) =>
const combineStats = (stats, path, combineValues) => zipWith(...map(stats, path), (...values) => combineValues(values))
const createGetProperty = (obj, property, defaultValue) => defaults(obj, { [property]: defaultValue })[property]
const testMetric = (test, type) =>
typeof test === 'string' ? test === type : typeof test === 'function' ? test(type) : test.exec(type)
@@ -226,31 +221,20 @@ const STATS = {
// data: Item[columns] // Item = { t: Number, values: Number[rows] }
// }
// Local cache
// _statsByObject : {
// [uuid]: {
// [step]: {
// endTimestamp: Number, // the timestamp of the last statistic point
// interval: Number, // step
// stats: {
// [metric1]: Number[],
// [metric2]: {
// [subMetric]: Number[],
// }
// }
// }
// }
// }
export default class XapiStats {
// hostCache => host uid => granularity => {
// timestamp
// value : promise or value
// }
#hostCache = {}
constructor() {
this._statsByObject = {}
}
// Execute one http request on a XenServer for get stats
// Return stats (Json format) or throws got exception
@limitConcurrency(3)
_getJson(xapi, host, timestamp, step) {
return xapi
_updateJsonCache(xapi, host, step, timestamp) {
const hostUuid = host.uuid
this.#hostCache[hostUuid] = this.#hostCache[hostUuid] ?? {}
const promise = xapi
.getResource('/rrd_updates', {
host,
query: {
@@ -262,27 +246,40 @@ export default class XapiStats {
},
})
.then(response => response.text().then(JSON5.parse))
.catch(err => {
delete this.#hostCache[hostUuid][step]
throw err
})
// clear cache when too old
setTimeout(() => {
// only if it has not been updated
if (this.#hostCache[hostUuid]?.[step]?.timestamp === timestamp) {
delete this.#hostCache[hostUuid][step]
}
}, (step + 1) * 1000)
this.#hostCache[hostUuid][step] = {
timestamp,
value: promise,
}
}
// To avoid multiple requests, we keep a cash for the stats and
// only return it if we not exceed a step
_getCachedStats(uuid, step, currentTimeStamp) {
const statsByObject = this._statsByObject
const stats = statsByObject[uuid]?.[step]
if (stats === undefined) {
return
}
if (stats.endTimestamp + step < currentTimeStamp) {
delete statsByObject[uuid][step]
return
}
return stats
_isCacheStale(hostUuid, step, timestamp) {
const byHost = this.#hostCache[hostUuid]?.[step]
// cache is empty or too old
return byHost === undefined || byHost.timestamp + step < timestamp
}
// Execute one http request on a XenServer for get stats
// Return stats (Json format) or throws got exception
_getJson(xapi, host, timestamp, step) {
if (this._isCacheStale(host.uuid, step, timestamp)) {
this._updateJsonCache(xapi, host, step, timestamp)
}
return this.#hostCache[host.uuid][step].value
}
@synchronized.withKey((_, { host }) => host.uuid)
async _getAndUpdateStats(xapi, { host, uuid, granularity }) {
const step = granularity === undefined ? RRD_STEP_SECONDS : RRD_STEP_FROM_STRING[granularity]
@@ -294,65 +291,61 @@ export default class XapiStats {
const currentTimeStamp = await getServerTimestamp(xapi, host.$ref)
const stats = this._getCachedStats(uuid, step, currentTimeStamp)
if (stats !== undefined) {
return stats
}
const maxDuration = step * RRD_POINTS_PER_STEP[step]
// To avoid crossing over the boundary, we ask for one less step
const optimumTimestamp = currentTimeStamp - maxDuration + step
const json = await this._getJson(xapi, host, optimumTimestamp, step)
const actualStep = json.meta.step
if (actualStep !== step) {
throw new FaultyGranularity(`Unable to get the true granularity: ${actualStep}`)
}
let stepStats
if (json.data.length > 0) {
// fetched data is organized from the newest to the oldest
// but this implementation requires it in the other direction
json.data.reverse()
const data = [...json.data]
data.reverse()
json.meta.legend.forEach((legend, index) => {
const [, type, uuid, metricType] = /^AVERAGE:([^:]+):(.+):(.+)$/.exec(legend)
const [, type, uuidInStat, metricType] = /^AVERAGE:([^:]+):(.+):(.+)$/.exec(legend)
const metrics = STATS[type]
if (metrics === undefined) {
return
}
if (uuidInStat !== uuid) {
return
}
const { metric, testResult } = findMetric(metrics, metricType)
if (metric === undefined) {
return
}
const xoObjectStats = createGetProperty(this._statsByObject, uuid, {})
let stepStats = xoObjectStats[actualStep]
if (stepStats === undefined || stepStats.endTimestamp !== json.meta.end) {
stepStats = xoObjectStats[actualStep] = {
stepStats = {
endTimestamp: json.meta.end,
interval: actualStep,
stats: {},
}
}
const path = metric.getPath !== undefined ? metric.getPath(testResult) : [findKey(metrics, metric)]
const lastKey = path.length - 1
let metricStats = createGetProperty(stepStats, 'stats', {})
let metricStats = stepStats.stats
path.forEach((property, key) => {
if (key === lastKey) {
metricStats[property] = computeValues(json.data, index, metric.transformValue)
metricStats[property] = computeValues(data, index, metric.transformValue)
return
}
metricStats = createGetProperty(metricStats, property, {})
metricStats = metricStats[property] = metricStats[property] ?? {}
})
})
}
if (actualStep !== step) {
throw new FaultyGranularity(`Unable to get the true granularity: ${actualStep}`)
}
return (
this._statsByObject[uuid]?.[step] ?? {
stepStats ?? {
endTimestamp: currentTimeStamp,
interval: step,
stats: {},

View File

@@ -24,7 +24,6 @@ import { execa } from 'execa'
export default class BackupNgFileRestore {
constructor(app) {
this._app = app
this._mounts = { __proto__: null }
// clean any LVM volumes that might have not been properly
// unmounted

View File

@@ -589,7 +589,7 @@ export default class XenServers {
const sourceXapi = this.getXapi(sourcePoolId)
const {
_auth: { user, password },
_url: { hostname },
_url: { hostnameRaw },
} = this.getXapi(targetPoolId)
// We don't want the events of the source XAPI to interfere with
@@ -597,7 +597,7 @@ export default class XenServers {
sourceXapi.xo.uninstall()
try {
await sourceXapi.joinPool(hostname, user, password, force)
await sourceXapi.joinPool(hostnameRaw, user, password, force)
} catch (e) {
sourceXapi.xo.install()

View File

@@ -2339,8 +2339,8 @@ const messages = {
xoConfigCloudBackup: 'XO Config Cloud Backup',
xoConfigCloudBackupTips:
'Your encrypted configuration is securely stored inside your Vates account and backed up once a day',
xoCloudConfigEnterPassphrase: 'If you want to encrypt backups, please enter a passphrase:',
xoCloudConfigRestoreEnterPassphrase: 'If the config is encrypted, please enter the passphrase:',
xoCloudConfigEnterPassphrase: 'Passphrase is required to encrypt backups',
xoCloudConfigRestoreEnterPassphrase: 'Enter the passphrase:',
// ----- XOSAN -----
xosanTitle: 'XOSAN',

View File

@@ -533,8 +533,7 @@ const xoItemToRender = {
<span>
<Icon icon='xo-cloud-config' /> <ShortDate timestamp={createdAt} />
</span>
)
,
),
// XO objects.
pool: props => <Pool {...props} />,
@@ -602,6 +601,7 @@ const xoItemToRender = {
</span>{' '}
<span className='tag tag-warning'>{backup.remote.name}</span>{' '}
{backup.size !== undefined && <span className='tag tag-info'>{formatSize(backup.size)}</span>}{' '}
{backup.dedup === true && <span className='tag tag-info'>deduplicated</span>}{' '}
<FormattedDate
value={new Date(backup.timestamp)}
month='long'

View File

@@ -999,7 +999,7 @@ const New = decorate([
<Tooltip content={_('clickForMoreInformation')}>
<a
className='text-info'
href='https://xen-orchestra.com/docs/delta_backups.html#full-backup-interval'
href='https://xen-orchestra.com/docs/incremental_backups.html#key-backup-interval'
rel='noopener noreferrer'
target='_blank'
>

3670
yarn.lock

File diff suppressed because it is too large Load Diff