Compare commits

..

3 Commits

34 changed files with 1289 additions and 1561 deletions

View File

@@ -65,11 +65,10 @@ module.exports = {
typescript: true,
'eslint-import-resolver-custom-alias': {
alias: {
'@core': '../web-core/lib',
'@': './src',
},
extensions: ['.ts'],
packages: ['@xen-orchestra/lite', '@xen-orchestra/web'],
packages: ['@xen-orchestra/lite'],
},
},
},

View File

@@ -160,10 +160,10 @@ export class ImportVmBackup {
// update the stream with the negative vhd stream
stream = await negativeVhd.stream()
vdis[vdiRef].baseVdi = snapshotCandidate
} catch (error) {
} catch (err) {
// can be a broken VHD chain, a vhd chain with a key backup, ....
// not an irrecuperable error, don't dispose parentVhd, and fallback to full restore
warn(`can't use differential restore`, { error })
warn(`can't use differential restore`, err)
disposableDescendants?.dispose()
}
}

View File

@@ -26,7 +26,16 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
}
_mustDoSnapshot() {
return true
const vm = this._vm
const settings = this._settings
return (
settings.unconditionalSnapshot ||
(!settings.offlineBackup && vm.power_state === 'Running') ||
settings.snapshotRetention !== 0 ||
settings.fullInterval !== 1 ||
settings.deltaComputationMode === 'AGAINST_PARENT_VHD'
)
}
async _copy() {

View File

@@ -3,7 +3,7 @@ import mapValues from 'lodash/mapValues.js'
import ignoreErrors from 'promise-toolbox/ignoreErrors'
import { asyncEach } from '@vates/async-each'
import { asyncMap } from '@xen-orchestra/async-map'
import { chainVhd, checkVhdChain, openVhd, VhdAbstract } from 'vhd-lib'
import { chainVhd, checkVhdChain, openVhd, VhdAbstract, VhdSynthetic } from 'vhd-lib'
import { createLogger } from '@xen-orchestra/log'
import { decorateClass } from '@vates/decorate-with'
import { defer } from 'golike-defer'
@@ -143,10 +143,8 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
let metadataContent = await this._isAlreadyTransferred(timestamp)
if (metadataContent !== undefined) {
// skip backup while being vigilant to not stuck the forked stream
// @todo : should skip backup while being vigilant to not stuck the forked stream
Task.info('This backup has already been transfered')
Object.values(deltaExport.streams).forEach(stream => stream.destroy())
return { size: 0 }
}
const basename = formatFilenameDate(timestamp)
@@ -185,6 +183,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
let parentPath
let parentVhd
if (isDifferencing) {
const vdiDir = dirname(path)
parentPath = (
@@ -206,6 +205,11 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
// TODO remove when this has been done before the export
await checkVhd(handler, parentPath)
if(settings.deltaComputationMode === 'AGAINST_PARENT_VHD'){
const {dispose, value } = await VhdSynthetic.fromVhdChain(handler, parentPath)
parentVhd = value
$defer(()=>dispose())
}
}
// don't write it as transferSize += await async function
@@ -215,6 +219,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
parentVhd,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._config.writeBlockConcurrency,
})

View File

@@ -113,13 +113,13 @@ export const MixinRemoteWriter = (BaseClass = Object) =>
)
}
async _isAlreadyTransferred(timestamp) {
_isAlreadyTransferred(timestamp) {
const vmUuid = this._vmUuid
const adapter = this._adapter
const backupDir = getVmBackupDir(vmUuid)
try {
const actualMetadata = JSON.parse(
await adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`)
adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`)
)
return actualMetadata
} catch (error) {}

View File

@@ -20,7 +20,5 @@ export function split(path) {
return parts
}
// paths are made absolute otherwise fs.relative() would resolve them against working directory
export const relativeFromFile = (file, path) => relative(dirname(normalize(file)), normalize(path))
export const relativeFromFile = (file, path) => relative(dirname(file), path)
export const resolveFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)

View File

@@ -1,17 +0,0 @@
import { describe, it } from 'test'
import { strict as assert } from 'assert'
import { relativeFromFile } from './path.js'
describe('relativeFromFile()', function () {
for (const [title, args] of Object.entries({
'file absolute and path absolute': ['/foo/bar/file.vhd', '/foo/baz/path.vhd'],
'file relative and path absolute': ['foo/bar/file.vhd', '/foo/baz/path.vhd'],
'file absolute and path relative': ['/foo/bar/file.vhd', 'foo/baz/path.vhd'],
'file relative and path relative': ['foo/bar/file.vhd', 'foo/baz/path.vhd'],
})) {
it('works with ' + title, function () {
assert.equal(relativeFromFile(...args), '../baz/path.vhd')
})
}
})

View File

@@ -54,10 +54,10 @@ async function handleExistingFile(root, indexPath, path) {
await indexFile(fullPath, indexPath)
}
}
} catch (error) {
if (error.code !== 'EEXIST') {
} catch (err) {
if (err.code !== 'EEXIST') {
// there can be a symbolic link in the tree
warn('handleExistingFile', { error })
warn('handleExistingFile', err)
}
}
}
@@ -106,7 +106,7 @@ export async function watchRemote(remoteId, { root, immutabilityDuration, rebuil
await File.liftImmutability(settingPath)
} catch (error) {
// file may not exists, and it's not really a problem
info('lifting immutability on current settings', { error })
info('lifting immutability on current settings', error)
}
await fs.writeFile(
settingPath,

View File

@@ -1,15 +1,13 @@
{
"extends": "@vue/tsconfig/tsconfig.dom.json",
"include": ["env.d.ts", "src/**/*", "src/**/*.vue", "../web-core/lib/**/*", "../web-core/lib/**/*.vue"],
"include": ["env.d.ts", "src/**/*", "src/**/*.vue"],
"exclude": ["src/**/__tests__/*"],
"compilerOptions": {
"composite": true,
"noEmit": true,
"baseUrl": ".",
"rootDir": "..",
"paths": {
"@/*": ["./src/*"],
"@core/*": ["../web-core/lib/*"]
"@/*": ["./src/*"]
}
}
}

View File

@@ -23,7 +23,6 @@ export default defineConfig({
resolve: {
alias: {
'@': fileURLToPath(new URL('./src', import.meta.url)),
'@core': fileURLToPath(new URL('../web-core/lib', import.meta.url)),
},
},

View File

@@ -27,16 +27,6 @@ log.error('could not join server', {
})
```
A logging method has the following signature:
```ts
interface LoggingMethod {
(error): void
(message: string, data?: { error?: Error; [property: string]: any }): void
}
```
### Consumer
Then, at application level, configure the logs are handled:

View File

@@ -45,16 +45,6 @@ log.error('could not join server', {
})
```
A logging method has the following signature:
```ts
interface LoggingMethod {
(error): void
(message: string, data?: { error?: Error; [property: string]: any }): void
}
```
### Consumer
Then, at application level, configure the logs are handled:

View File

@@ -10,8 +10,7 @@
}
},
"devDependencies": {
"vue": "^3.4.13",
"@vue/tsconfig": "^0.5.1"
"vue": "^3.4.13"
},
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/web-core",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
@@ -26,6 +25,6 @@
},
"license": "AGPL-3.0-or-later",
"engines": {
"node": ">=18"
"node": ">=8.10"
}
}

View File

@@ -1,12 +0,0 @@
{
"extends": "@vue/tsconfig/tsconfig.dom.json",
"include": ["env.d.ts", "lib/**/*", "lib/**/*.vue"],
"exclude": ["lib/**/__tests__/*"],
"compilerOptions": {
"noEmit": true,
"baseUrl": ".",
"paths": {
"@core/*": ["./lib/*"]
}
}
}

View File

@@ -1,22 +1,13 @@
{
"extends": "@vue/tsconfig/tsconfig.dom.json",
"include": [
"env.d.ts",
"typed-router.d.ts",
"src/**/*",
"src/**/*.vue",
"../web-core/lib/**/*",
"../web-core/lib/**/*.vue"
],
"include": ["env.d.ts", "typed-router.d.ts", "src/**/*", "src/**/*.vue"],
"exclude": ["src/**/__tests__/*"],
"compilerOptions": {
"composite": true,
"noEmit": true,
"baseUrl": ".",
"rootDir": "..",
"paths": {
"@/*": ["./src/*"],
"@core/*": ["../web-core/lib/*"]
"@/*": ["./src/*"]
}
}
}

View File

@@ -11,7 +11,6 @@ export default defineConfig({
resolve: {
alias: {
'@': fileURLToPath(new URL('./src', import.meta.url)),
'@core': fileURLToPath(new URL('../web-core/lib', import.meta.url)),
},
},
})

View File

@@ -21,23 +21,12 @@ export default class Vif {
MAC = '',
} = {}
) {
if (device === undefined) {
const allowedDevices = await this.call('VM.get_allowed_VIF_devices', VM)
if (allowedDevices.length === 0) {
const error = new Error('could not find an allowed VIF device')
error.poolUuid = this.pool.uuid
error.vmRef = VM
throw error
}
device = allowedDevices[0]
}
const [powerState, ...rest] = await Promise.all([
this.getField('VM', VM, 'power_state'),
MTU ?? this.getField('network', network, 'MTU'),
device ?? (await this.call('VM.get_allowed_VIF_devices', VM))[0],
MTU ?? (await this.getField('network', network, 'MTU')),
])
;[MTU] = rest
;[device, MTU] = rest
const vifRef = await this.call('VIF.create', {
currently_attached: powerState === 'Suspended' ? currently_attached : undefined,

View File

@@ -8,16 +8,11 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- Disable search engine indexing via a `robots.txt`
- [Stats] Support format used by XAPI 23.31
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [Settings/XO Config] Sort backups from newest to oldest
- [Plugins/audit] Don't log `tag.getAllConfigured` calls
- [Remotes] Correctly clear error when the remote is tested with success
### Packages to release
> When modifying a package, add it here with its release type.
@@ -34,12 +29,6 @@
<!--packages-start-->
- @xen-orchestra/backups patch
- @xen-orchestra/fs patch
- @xen-orchestra/xapi patch
- vhd-lib patch
- xo-server minor
- xo-server-audit patch
- xo-web patch
- xo-server patch
<!--packages-end-->

View File

@@ -1,127 +0,0 @@
# Contributing to Xen Orchestra
First off, thanks for taking the time to contribute! ❤️
All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
>
> - Star the project
> - Tweet about it
> - Refer this project in your project's readme
> - Mention the project at local meetups and tell your friends/colleagues
## Table of Contents
- [Code of Conduct](#code-of-conduct)
- [I Have a Question](#i-have-a-question)
- [I Want To Contribute](#i-want-to-contribute)
- [Reporting Bugs](#reporting-bugs)
- [Suggesting Enhancements](#suggesting-enhancements)
- [Your First Code Contribution](#your-first-code-contribution)
- [Improving The Documentation](#improving-the-documentation)
- [Styleguides](#styleguides)
- [Commit Messages](#commit-messages)
- [Join The Project Team](#join-the-project-team)
## Code of Conduct
This project and everyone participating in it is governed by the
[Xen Orchestra Code of Conduct](https://github.com/vatesfr/xen-orchestra/blob/master/CODE_OF_CONDUCT.md).
By participating, you are expected to uphold this code. Please report unacceptable behavior
to julien.fontanet@vates.fr.
## I Have a Question
> If you want to ask a question, we assume that you have read the available [Documentation](https://xen-orchestra.com/docs/).
Before you ask a question, it is best to search for existing [topics on our forum](https://xcp-ng.org/forum/category/12/xen-orchestra) that might help you. In case you have found a suitable topic and still need clarification, you can write your question in this thread. It is also advisable to search the internet for answers first.
If you then still feel the need to ask a question and need clarification, we recommend the following:
- Open a new Topic.
- Provide as much context as you can about what you're running into.
- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant.
The community will then answer you as soon as possible.
## I Want To Contribute
> ### Legal Notice
>
> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
### Reporting Bugs
#### Before Submitting a Bug Report
A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
- Make sure that you are using the latest version.
- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](https://xen-orchestra.com/docs/). If you are looking for support, you might want to check [this section](#i-have-a-question)).
- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/vatesfr/xen-orchestra/issues?q=label%3Abug).
- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
- Collect information about the bug:
- Stack trace (Traceback)
- OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
- Possibly your input and the output
- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
#### How Do I Submit a Good Bug Report?
> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be reported on [this dedicated page](https://github.com/vatesfr/xen-orchestra/security/advisories/new).
We use GitHub issues to track bugs and errors. If you run into an issue with the project:
- Open an [Issue](https://github.com/vatesfr/xen-orchestra/issues/new/choose). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.)
- Explain the behavior you would expect and the actual behavior.
- Please provide as much context as possible and describe the _reproduction steps_ that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
- Provide the information you collected in the previous section.
### Suggesting Enhancements
This section guides you through submitting an enhancement suggestion for Xen Orchestra, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
#### Before Submitting an Enhancement
- Make sure that you are using the latest version.
- Read the [documentation](https://xen-orchestra.com/docs/) carefully and find out if the functionality is already covered, maybe by an individual configuration.
- Perform a [search](https://github.com/vatesfr/xen-orchestra/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
#### How Do I Submit a Good Enhancement Suggestion?
Enhancement suggestions are tracked as [GitHub issues](https://github.com/vatesfr/xen-orchestra/issues).
- Use a **clear and descriptive title** for the issue to identify the suggestion.
- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux.
- **Explain why this enhancement would be useful** to most Xen Orchestra users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
### Your First Code Contribution
<!-- TODO
include Setup of env, IDE and typical getting started instructions?
-->
### Improving The Documentation
<!-- TODO
Updating, improving and correcting the documentation
-->
## Styleguides
### Commit Messages
<!-- TODO
-->
## Attribution
This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!

View File

@@ -1,6 +1,4 @@
{
"name": "xen-orchestra",
"version": "0.0.0",
"devDependencies": {
"@babel/core": "^7.0.0",
"@babel/eslint-parser": "^7.13.8",
@@ -96,7 +94,7 @@
},
"private": true,
"scripts": {
"build": "TURBO_TELEMETRY_DISABLED=1 turbo run build --scope xo-server --scope xo-server-'*' --scope xo-web",
"build": "turbo run build --scope xo-server --scope xo-server-'*' --scope xo-web",
"build:xo-lite": "turbo run build --scope @xen-orchestra/lite",
"clean": "scripts/run-script.js --parallel clean",
"dev": "scripts/run-script.js --parallel --concurrency 0 --verbose dev",

View File

@@ -84,6 +84,9 @@ exports.VhdAbstract = class VhdAbstract {
readBlockAllocationTable() {
throw new Error(`reading block allocation table is not implemented`)
}
readBlockHashes() {
throw new Error(`reading block hashes table is not implemented`)
}
/**
* @typedef {Object} BitmapBlock
@@ -104,6 +107,10 @@ exports.VhdAbstract = class VhdAbstract {
throw new Error(`reading ${onlyBitmap ? 'bitmap of block' : 'block'} ${blockId} is not implemented`)
}
getBlockHash(blockId){
throw new Error(`reading block hash ${blockId} is not implemented`)
}
/**
* coalesce the block with id blockId from the child vhd into
* this vhd

View File

@@ -4,6 +4,7 @@ const { unpackHeader, unpackFooter, sectorsToBytes } = require('./_utils')
const { createLogger } = require('@xen-orchestra/log')
const { fuFooter, fuHeader, checksumStruct } = require('../_structs')
const { test, set: setBitmap } = require('../_bitmap')
const { hashBlock } = require('../hashBlock')
const { VhdAbstract } = require('./VhdAbstract')
const assert = require('assert')
const { synchronized } = require('decorator-synchronized')
@@ -75,6 +76,7 @@ function getCompressor(compressorType) {
exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
#uncheckedBlockTable
#blockHashes
#header
footer
#compressor
@@ -140,6 +142,17 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
this.#blockTable = buffer
}
async readBlockHashes() {
try {
const { buffer } = await this._readChunk('hashes')
this.#blockHashes = JSON.parse(buffer)
} catch (err) {
if (err.code !== 'ENOENT') {
throw err
}
}
}
containsBlock(blockId) {
return test(this.#blockTable, blockId)
}
@@ -177,6 +190,11 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
const blockSuffix = blockId - blockPrefix * 1e3
return `blocks/${blockPrefix}/${blockSuffix}`
}
getBlockHash(blockId) {
if (this.#blockHashes !== undefined) {
return this.#blockHashes[blockId]
}
}
_getFullBlockPath(blockId) {
return this.#getChunkPath(this.#getBlockPath(blockId))
@@ -209,6 +227,10 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
throw new Error(`reading 'bitmap of block' ${blockId} in a VhdDirectory is not implemented`)
}
const { buffer } = await this._readChunk(this.#getBlockPath(blockId))
const hash = this.getBlockHash(blockId)
if (hash) {
assert.strictEqual(hash, hash(buffer))
}
return {
id: blockId,
bitmap: buffer.slice(0, this.bitmapSize),
@@ -244,7 +266,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
assert.notStrictEqual(this.#blockTable, undefined, 'Block allocation table has not been read')
assert.notStrictEqual(this.#blockTable.length, 0, 'Block allocation table is empty')
return this._writeChunk('bat', this.#blockTable)
return Promise.all([this._writeChunk('bat', this.#blockTable), this._writeChunk('hashes', this.#blockHashes)])
}
// only works if data are in the same handler
@@ -265,8 +287,11 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
await this._handler.rename(childBlockPath, this._getFullBlockPath(blockId))
if (!blockExists) {
setBitmap(this.#blockTable, blockId)
this.#blockHashes[blockId] = child.getBlockHash(blockId)
await this.writeBlockAllocationTable()
}
// @todo block hashes changs may be lost if the vhd merging fail
// should migrate to writing bat from time to time, sync with the metadata
} catch (error) {
if (error.code === 'ENOENT' && isResumingMerge === true) {
// when resuming, the blocks moved since the last merge state write are
@@ -287,6 +312,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
async writeEntireBlock(block) {
await this._writeChunk(this.#getBlockPath(block.id), block.buffer)
setBitmap(this.#blockTable, block.id)
this.#blockHashes[block.id] = hashBlock(block.buffer)
}
async _readParentLocatorData(id) {

View File

@@ -96,6 +96,10 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
assert(false, `no such block ${blockId}`)
}
async getBlockHash(blockId){
return this.#getVhdWithBlock(blockId).getBlockHash(blockId)
}
async readBlock(blockId, onlyBitmap = false) {
// only read the content of the first vhd containing this block
return await this.#getVhdWithBlock(blockId).readBlock(blockId, onlyBitmap)

View File

@@ -1,6 +1,6 @@
'use strict'
const { relativeFromFile } = require('@xen-orchestra/fs/path')
const { dirname, relative } = require('path')
const { openVhd } = require('./openVhd')
const { DISK_TYPES } = require('./_constants')
@@ -21,7 +21,7 @@ module.exports = async function chain(parentHandler, parentPath, childHandler, c
}
await childVhd.readBlockAllocationTable()
const parentName = relativeFromFile(childPath, parentPath)
const parentName = relative(dirname(childPath), parentPath)
header.parentUuid = parentVhd.footer.uuid
header.parentUnicodeName = parentName
await childVhd.setUniqueParentLocator(parentName)

View File

@@ -1,6 +1,7 @@
'use strict'
const { createLogger } = require('@xen-orchestra/log')
const { hashBlock } = require('./hashBlock.js')
const { parseVhdStream } = require('./parseVhdStream.js')
const { VhdDirectory } = require('./Vhd/VhdDirectory.js')
const { Disposable } = require('promise-toolbox')
@@ -8,7 +9,7 @@ const { asyncEach } = require('@vates/async-each')
const { warn } = createLogger('vhd-lib:createVhdDirectoryFromStream')
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression }) {
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression, parentVhd }) {
const vhd = yield VhdDirectory.create(handler, path, { compression })
await asyncEach(
parseVhdStream(inputStream),
@@ -24,6 +25,10 @@ const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, {
await vhd.writeParentLocator({ ...item, data: item.buffer })
break
case 'block':
if (parentVhd !== undefined && hashBlock(item.buffer) === parentVhd.getBlockHash(item.id)) {
// already in parent
return
}
await vhd.writeEntireBlock(item)
break
case 'bat':
@@ -45,10 +50,10 @@ exports.createVhdDirectoryFromStream = async function createVhdDirectoryFromStre
handler,
path,
inputStream,
{ validator, concurrency = 16, compression } = {}
{ validator, concurrency = 16, compression, parentVhd } = {}
) {
try {
const size = await buildVhd(handler, path, inputStream, { concurrency, compression })
const size = await buildVhd(handler, path, inputStream, { concurrency, compression, parentVhd })
if (validator !== undefined) {
await validator.call(this, path)
}

View File

@@ -0,0 +1,12 @@
'use strict'
const { createHash } = require('node:crypto')
// using xxhash as for xva would make smaller hash and the collision risk would be low for the dedup,
// since we have a tuple(index, hash), but it would be notable if
// we implement dedup on top of this later
// at most, a 2TB full vhd will use 32MB for its hashes
// and this file is compressed with vhd block
exports.hashBlock = function (buffer) {
return createHash('sha256').update(buffer).digest('hex')
}

View File

@@ -72,7 +72,6 @@ const DEFAULT_BLOCKED_LIST = {
'system.getServerTimezone': true,
'system.getServerVersion': true,
'system.getVersion': true,
'tag.getAllConfigured': true,
'test.getPermissionsForUser': true,
'user.getAll': true,
'user.getAuthenticationTokens': true,

View File

@@ -27,7 +27,7 @@ async function sendToNagios(app, jobName, vmBackupInfo) {
jobName
)
} catch (error) {
warn('sendToNagios:', { error })
warn('sendToNagios:', error)
}
}

View File

@@ -45,17 +45,7 @@ const RRD_POINTS_PER_STEP = {
// Utils
// -------------------------------------------------------------------
function parseNumber(value) {
// Starting from XAPI 23.31, numbers in the JSON payload are encoded as
// strings to support NaN, Infinity and -Infinity
if (typeof value === 'string') {
const asNumber = +value
if (isNaN(asNumber) && value !== 'NaN') {
throw new Error('cannot parse number: ' + value)
}
value = asNumber
}
function convertNanToNull(value) {
return isNaN(value) ? null : value
}
@@ -68,7 +58,7 @@ async function getServerTimestamp(xapi, hostRef) {
// -------------------------------------------------------------------
const computeValues = (dataRow, legendIndex, transformValue = identity) =>
map(dataRow, ({ values }) => transformValue(parseNumber(values[legendIndex])))
map(dataRow, ({ values }) => transformValue(convertNanToNull(values[legendIndex])))
const combineStats = (stats, path, combineValues) => zipWith(...map(stats, path), (...values) => combineValues(values))
@@ -255,15 +245,7 @@ export default class XapiStats {
start: timestamp,
},
})
.then(response => response.text())
.then(data => {
try {
// starting from XAPI 23.31, the response is valid JSON
return JSON.parse(data)
} catch (_) {
return JSON5.parse(data)
}
})
.then(response => response.text().then(JSON5.parse))
.catch(err => {
delete this.#hostCache[hostUuid][step]
throw err
@@ -317,7 +299,7 @@ export default class XapiStats {
// To avoid crossing over the boundary, we ask for one less step
const optimumTimestamp = currentTimeStamp - maxDuration + step
const json = await this._getJson(xapi, host, optimumTimestamp, step)
const actualStep = parseNumber(json.meta.step)
const actualStep = json.meta.step
if (actualStep !== step) {
throw new FaultyGranularity(`Unable to get the true granularity: ${actualStep}`)
@@ -344,10 +326,9 @@ export default class XapiStats {
return
}
const endTimestamp = parseNumber(json.meta.end)
if (stepStats === undefined || stepStats.endTimestamp !== endTimestamp) {
if (stepStats === undefined || stepStats.endTimestamp !== json.meta.end) {
stepStats = {
endTimestamp,
endTimestamp: json.meta.end,
interval: actualStep,
stats: {},
}

View File

@@ -75,7 +75,7 @@ export const reportOnSupportPanel = async ({ files = [], formatMessage = identit
ADDITIONAL_FILES.map(({ fetch, name }) =>
timeout.call(fetch(), ADDITIONAL_FILES_FETCH_TIMEOUT).then(
file => formData.append('attachments', createBlobFromString(file), name),
error => logger.warn(`cannot get ${name}`, { error })
error => logger.warn(`cannot get ${name}`, error)
)
)
)

View File

@@ -1099,9 +1099,7 @@ export const SelectXoCloudConfig = makeSubscriptionSelect(
subscriber =>
subscribeCloudXoConfigBackups(configs => {
const xoObjects = groupBy(
map(configs, config => ({ ...config, type: 'xoConfig' }))
// from newest to oldest
.sort((a, b) => b.createdAt - a.createdAt),
map(configs, config => ({ ...config, type: 'xoConfig' })),
'xoaId'
)
subscriber({

View File

@@ -5,9 +5,10 @@ import decorate from 'apply-decorators'
import Icon from 'icon'
import React from 'react'
import { confirm } from 'modal'
import { getApiApplianceInfo, subscribeCloudXoConfig, subscribeCloudXoConfigBackups } from 'xo'
import { groupBy, sortBy } from 'lodash'
import { injectState, provideState } from 'reaclette'
import { SelectXoCloudConfig } from 'select-objects'
import { subscribeCloudXoConfig, subscribeCloudXoConfigBackups } from 'xo'
import BackupXoConfigModal from './backup-xo-config-modal'
import RestoreXoConfigModal from './restore-xo-config-modal'
@@ -87,7 +88,15 @@ const CloudConfig = decorate([
},
},
computed: {
applianceId: async () => {
const { id } = await getApiApplianceInfo()
return id
},
groupedConfigs: ({ applianceId, sortedConfigs }) =>
sortBy(groupBy(sortedConfigs, 'xoaId'), config => (config[0].xoaId === applianceId ? -1 : 1)),
isConfigDefined: ({ config }) => config != null,
sortedConfigs: (_, { cloudXoConfigBackups }) =>
cloudXoConfigBackups?.sort((config, nextConfig) => config.createdAt - nextConfig.createdAt),
},
}),
injectState,

View File

@@ -33,7 +33,7 @@ const formatError = error => (typeof error === 'string' ? error : JSON.stringify
const _changeUrlElement = (value, { remote, element }) =>
editRemote(remote, {
url: format({ ...parse(remote.url), [element]: value === null ? undefined : value }),
url: format({ ...remote, [element]: value === null ? undefined : value }),
})
const _showError = remote => alert(_('remoteConnectionFailed'), <pre>{formatError(remote.error)}</pre>)
const _editRemoteName = (name, { remote }) => editRemote(remote, { name })

2448
yarn.lock

File diff suppressed because it is too large Load Diff