Compare commits

...

105 Commits

Author SHA1 Message Date
Julien Fontanet
a174f8fcfc feat(backup/VmBackup): regularly report speed in task info 2022-01-24 15:42:51 +01:00
Florent Beauchamp
97d94b7952 feat(vhd-lib): merge blocks in parallel for VhdDirectory, throttle mergestate writes 2022-01-24 10:37:34 +01:00
Florent Beauchamp
96eb793298 feat(fs): s3#rmtree parallelize object deletion 2022-01-24 10:37:34 +01:00
Florent Beauchamp
b4f15de7be feat(fs): s3#copy don't use multi part copy for objects smaller than 5GB 2022-01-24 10:37:34 +01:00
Mathieu
ae5726b836 fix(xo-server-audit/generateFingerprint): handle the case when db is empty (#6071)
Fixes #6040
2022-01-21 12:47:56 +01:00
Florent BEAUCHAMP
692e72a78a fix(vhd-lib): fixes asserts on existing merge state (#6099)
Introduced by 5a933bad9
2022-01-21 12:40:45 +01:00
Pierre Donias
ff24364bb6 fix(CHANGELOG): fix and homogenize changelog (#6102) 2022-01-20 15:40:49 +01:00
Florent BEAUCHAMP
b60a1958b6 fix(fs#S3#{list,rmtree}): fix support of more than 1000 files (#6103)
Bug found when working on #6100
2022-01-20 14:31:13 +01:00
Florent Beauchamp
f6a2b505db feat(backups): execute cleanup on each related vm dir after a backup deletion 2022-01-19 10:46:15 +01:00
Florent Beauchamp
38aacdbd7d feat(xo-web): delete all the backups at once and let xo-server handle the cleanup 2022-01-19 10:46:15 +01:00
Florent Beauchamp
089b877cc5 fix(backups): use handler.unlink for deleting metadat instead of VhdAbstract.unlink 2022-01-19 10:46:15 +01:00
Julien Fontanet
81e55dcf77 fix(backups/listAllVmBackups): ignore empty dirs 2022-01-19 10:43:00 +01:00
Julien Fontanet
58dd44bf5d fix(backups/listAllVmBackups): ignore hidden and lock files 2022-01-19 10:41:20 +01:00
Julien Fontanet
3aa6669fd9 chore(vhd-lib): move alias utils to aliases submodule
Introduced in e15be7ebd
2022-01-18 15:33:31 +01:00
Florent BEAUCHAMP
c10601d905 fix(backups/_MixinBackupWriter#afterBackup): execute merge on S3 (#6095)
Introduced by 47f9da216
2022-01-18 11:05:55 +01:00
Florent BEAUCHAMP
e15be7ebd3 fix(backups/_cleanVm): fixes for aliases cleaning (#6094)
Introduced in 249f638495
2022-01-18 10:07:56 +01:00
Julien Fontanet
b465a91cd3 fix(value-matcher/README): __all → __and 2022-01-18 08:58:24 +01:00
Julien Fontanet
f304a46bea fix(vhd-lib/VhdDirectory): missing readChunkFilters in readHeaderAndFooter
Introduced by 249f63849
2022-01-17 10:18:54 +01:00
Pierre Donias
6756faa1cc feat(xo-server,xo-web): disable Load Balancer during Rolling Pool Update (#6089)
Fixes #5711
Follow-up of #6057
2022-01-17 10:08:32 +01:00
Julien Fontanet
73fd7c7d54 fix(backups/_cleanVm): temporary disable aliases checking 2022-01-17 09:52:42 +01:00
Julien Fontanet
60eda9ec69 chore(vhd-lib): remove contentVhdStreamWithLength export from main module 2022-01-16 22:44:41 +01:00
Julien Fontanet
a979c29a15 chore(vhd-lib): remove createReadableRawStream
Use `VhdAbstract#rawContent()` instead.
2022-01-16 22:34:04 +01:00
Julien Fontanet
8f25082917 fix(xo-vmdk-to-vhd): avoid requiring whole vhd-lib
Introduced by 9375b1c8b

Fixes #6093
2022-01-16 22:31:38 +01:00
Nicolas Raynaud
9375b1c8bd feat: support VDI export in VMDK (#5982)
Co-authored-by: Rajaa.BARHTAOUI <rajaa.barhtaoui@gmail.com>
Co-authored-by: Julien Fontanet <julien.fontanet@isonoe.net>
Co-authored-by: Florent BEAUCHAMP <flo850@free.fr>
2022-01-16 18:40:08 +01:00
Julien Fontanet
422a22a767 chore: update dev deps 2022-01-14 14:51:38 +01:00
Florent BEAUCHAMP
249f638495 feat(backups/_cleanVm): check VHD aliases (#6043) 2022-01-13 16:07:28 +01:00
Florent BEAUCHAMP
6cf5e10195 feat(fs/S3#_writeFile): retry on remote internal error (#6086) 2022-01-13 15:46:12 +01:00
Florent BEAUCHAMP
b78a946458 feat(proxy): implement reverse proxies (#6072) 2022-01-13 14:54:10 +01:00
Julien Fontanet
e8a5694d51 feat(backups/_cleanVm): clean orphan mergeState (#6087)
Fixes zammad#4778
2022-01-13 10:41:39 +01:00
Julien Fontanet
514fa72ee2 fix(package.json/jest): vhd-lib no longer has a build step
Introduced by 3a74c71f1
2022-01-12 22:50:49 +01:00
Julien Fontanet
e9ca13aa12 fix(backups/cleanVm): handle zstd-compressed XVAs
Related to zammad#4300
2022-01-12 11:31:09 +01:00
Julien Fontanet
57f1ec6716 chore(backups/_cleanVm/listVhds): make vhds directly a Set 2022-01-11 15:31:56 +01:00
Julien Fontanet
02e32cc9b9 chore(backups/_cleanVm/listVhds): minor simplification
This also removes the incorrect handling of an optional dir in `INTERRUPTED_VHDS_REG`.
2022-01-11 15:09:18 +01:00
Julien Fontanet
902abd5d94 chore: update deps 2022-01-06 13:59:31 +01:00
Julien Fontanet
53380802ec feat(xo-server): limit VM migration concurrency (#6076)
Related to #6065
2022-01-06 09:32:42 +01:00
Julien Fontanet
af5d8d02b6 feat: release 5.66.2 2022-01-05 11:30:29 +01:00
Julien Fontanet
7abba76f03 feat(CHANGELOG): integrate released changes 2022-01-05 10:36:05 +01:00
Julien Fontanet
79b22057d9 feat(xo-web): 5.91.2 2022-01-05 10:34:30 +01:00
Julien Fontanet
366daef718 feat(xo-server): 5.86.3 2022-01-05 10:33:30 +01:00
Julien Fontanet
a5ff0ba799 feat(@xen-orchestra/proxy): 0.17.3 2022-01-05 10:32:42 +01:00
Julien Fontanet
c2c6febb88 feat(@xen-orchestra/backups): 0.18.3 2022-01-05 10:18:02 +01:00
Julien Fontanet
f119c72a7f feat(xo-vmdk-to-vhd): 2.0.3 2022-01-05 10:16:47 +01:00
Julien Fontanet
8aee897d23 feat(vhd-lib): 3.0.0 2022-01-05 10:15:45 +01:00
Florent BEAUCHAMP
729db5c662 fix(backups): race condition in checkBaseVdi preventing delta backup (#6075)
Fixes zammad#4751, zammad#4729, zammad#4665 and zammad#4300
2022-01-05 09:58:06 +01:00
Julien Fontanet
61c46df7bf chore(xo-server): dont pass (unused) httpServer to app 2022-01-03 16:04:18 +01:00
Julien Fontanet
9b1a04338d chore(xo-server): attach express before creating app 2022-01-03 15:46:30 +01:00
Julien Fontanet
d307134d22 chore(xapi/_assertHealthyVdiChain): clearer warnings in case of missing VDI 2021-12-28 18:14:32 +01:00
Julien Fontanet
5bc44363f9 fix(xo-vmdk-to-vhd): fix createReadableSparseStream import
Introduced by 3a74c71f1

Fixes #6068
2021-12-23 23:40:58 +01:00
Julien Fontanet
68c4fac3ab chore: update deps 2021-12-23 13:25:48 +01:00
Julien Fontanet
6ad9245019 feat: release 5.66.1 2021-12-23 13:25:08 +01:00
Julien Fontanet
763cf771fb feat(CHANGELOG): integrate released changes 2021-12-23 12:18:50 +01:00
Julien Fontanet
3160b08637 feat(xo-web): 5.91.1 2021-12-23 12:18:14 +01:00
Julien Fontanet
f8949958a3 feat(xo-server): 5.86.2 2021-12-23 12:17:54 +01:00
Julien Fontanet
8b7ac07d2d feat(@xen-orchestra/proxy): 0.17.2 2021-12-23 12:17:25 +01:00
Julien Fontanet
044df9adba feat(@xen-orchestra/backups): 0.18.2 2021-12-23 12:16:53 +01:00
Julien Fontanet
040139f4cc fix(backups/cleanVm): computeVhdSize can return undefined 2021-12-23 12:09:11 +01:00
Julien Fontanet
7b73bb9df0 chore: format with Prettier 2021-12-23 12:06:11 +01:00
Julien Fontanet
24c8370daa fix(xo-server-test): add missing ESLint config 2021-12-23 11:58:14 +01:00
Julien Fontanet
029c4921d7 fix(backups/RemoteAdapter#isMergeableParent): #useVhdDirectory is a function (#6070)
Fixes zammad#4646
Fixes https://xcp-ng.org/forum/topic/5371/delta-backup-changes-in-5-66

Introduced by 5d605d1bd
2021-12-23 11:57:51 +01:00
Julien Fontanet
3a74c71f1a chore(vhd-lib): remove build step
BREAKING:
- removes `dist/` in the path of sub-modules
- requires Node >=12
2021-12-23 10:31:29 +01:00
Julien Fontanet
6022a1bbaa feat(normalize-packages): delete unused Babel configs 2021-12-23 09:26:00 +01:00
Julien Fontanet
4e88c993f7 chore: update dev deps 2021-12-22 11:07:25 +01:00
Julien Fontanet
c9a61f467c fix(xo-web/Dashboard/Health): handle no default_SR
Fixes zammad#4640

Introduced by 7bacd781c
2021-12-22 10:33:18 +01:00
Julien Fontanet
e6a5f42f63 feat: release 5.66.0 2021-12-21 18:00:39 +01:00
Julien Fontanet
a373823eea feat(xo-server): 5.86.1 2021-12-21 17:58:02 +01:00
Julien Fontanet
b5e010eac8 feat(@xen-orchestra/proxy): 0.17.1 2021-12-21 17:57:47 +01:00
Julien Fontanet
50ffe58655 feat(@xen-orchestra/backups): 0.18.1 2021-12-21 17:56:55 +01:00
Julien Fontanet
07eb3b59b3 feat(@xen-orchestra/mixins): 0.1.2 2021-12-21 17:56:52 +01:00
Julien Fontanet
5177b5e142 chore(backups/RemoteAdapter): remove default value for vhdDirectoryCompression
Introduced by 3c984e21c
2021-12-21 17:51:23 +01:00
Julien Fontanet
3c984e21cd fix({proxy,xo-server}): add backup.vhdDirectoryCompression setting
Introduced by 5d605d1bd
2021-12-21 17:49:43 +01:00
Julien Fontanet
aa2b27e22b fix(mixins/Config#get): fix missing entry error message 2021-12-21 17:37:07 +01:00
Julien Fontanet
14a7f00c90 chore(CHANGELOG): remove non-breakable spaces 2021-12-21 17:31:51 +01:00
Julien Fontanet
56f98601bd feat(CHANGELOG): integrate released changes 2021-12-21 17:24:19 +01:00
Julien Fontanet
027a8c675e feat(@xen-orchestra/proxy): 0.17.0 2021-12-21 17:22:29 +01:00
Julien Fontanet
bdaba9a767 feat(xo-server): 5.86.0 2021-12-21 17:22:07 +01:00
Julien Fontanet
4e9090f60d feat(@xen-orchestra/backups): 0.18.0 2021-12-21 17:21:37 +01:00
Julien Fontanet
73b445d371 feat(xo-vmdk-to-vhd): 2.0.2 2021-12-21 17:21:10 +01:00
Julien Fontanet
75bfc283af feat(vhd-lib): 2.1.0 2021-12-21 17:20:36 +01:00
Julien Fontanet
727de19b89 feat(@xen-orchestra/xapi): 0.8.5 2021-12-21 17:20:06 +01:00
Florent BEAUCHAMP
5d605d1bd7 feat(backups): compress VHDs on S3 (#5932) 2021-12-21 17:18:27 +01:00
Julien Fontanet
ffdd1dfd6f fix(xo-vmdk-to-vhd): avoid requiring whole vhd-lib
This library is used in the browser and a lot of parts of `vhd-lib` are not intended to be used in (or bundled for) the browser.
2021-12-21 17:10:33 +01:00
Julien Fontanet
d45418eb29 fix(backups/cleanVm): metadata.vhds is an object, not an array
Introduced by 93069159d
2021-12-21 16:23:03 +01:00
Julien Fontanet
6ccc9d1ade fix(xapi/VM_create): support NVRAM field (#6062)
Fixes #6054
Fixes https://xcp-ng.org/forum/topic/5319/bug-uefi-boot-parameters-not-preserved-with-delta-backups
2021-12-20 16:30:41 +01:00
Florent BEAUCHAMP
93069159dd fix(backups/cleanVm): don't warn on size change due to merged VHDs (#6010) 2021-12-20 14:57:54 +01:00
Julien Fontanet
8c4780131f feat: release 5.65.3 2021-12-20 10:50:51 +01:00
Julien Fontanet
02ae8bceda fix(backups/cleanVm): dont fail on broken metadata 2021-12-20 09:49:27 +01:00
Julien Fontanet
bb10bbc945 chore(backups/cleanVm): remove deleted files from jsons 2021-12-20 09:46:09 +01:00
Florent BEAUCHAMP
478d88e97f fix(fs/s3#_rmtree): infinite loop (#6067) 2021-12-17 16:01:57 +01:00
Florent BEAUCHAMP
6fb397a729 fix(vhd-lib): parseVhdStream int overflow when rebuilding the bat (#6066)
BAT should contain sector address, not byte address

We were not really rebuilding the BAT, since we were using the data read in the old bat and write it as is in the new one
2021-12-17 14:28:48 +01:00
Julien Fontanet
18dae34778 feat(vhd-lib/parseVhdStream): new public method (#6063)
Extracted from `createVhdDirectoryFromStream`

Co-authored-by: Florent Beauchamp <flo850@free.fr>
2021-12-17 10:08:29 +01:00
Julien Fontanet
243566e936 fix(xen-api): named import for @vates/coalesce-calls
Introduced by 87f4fd675
2021-12-16 14:00:49 +01:00
Julien Fontanet
87f4fd675d fix(xen-api): fix coalesceCalls
Introduced by dec6b59a9
2021-12-16 13:26:31 +01:00
Julien Fontanet
dec6b59a9f chore(xen-api): use @vates/coalesce-calls 2021-12-16 12:03:07 +01:00
Rajaa.BARHTAOUI
e51baedf7f feat: technical release (#6060) 2021-12-16 12:01:57 +01:00
Julien Fontanet
530da14e24 feat(@vates/decorate-with): 1.0.0 2021-12-16 11:49:29 +01:00
Julien Fontanet
02da7c272f feat(decorate-with): perInstance helper 2021-12-16 11:48:48 +01:00
Pierre Donias
a07c5418e9 feat(xo-server,xo-web): disable HA during Rolling Pool Update (#6057)
See #5711
2021-12-16 10:29:13 +01:00
Mathieu
c080db814b feat(xo-web/home/backed up VMs): filter out VMs in disabled backup jobs (#6037)
See xoa-support#4294
2021-12-16 10:06:45 +01:00
Julien Fontanet
3abe13c006 chore(backups/RemoteAdapter#deleteVmBackups): report unsupported backup modes
It was removed in 7e302fd1c
2021-12-16 10:05:08 +01:00
Julien Fontanet
fb331c0a2c fix(backups/RemoteAdapter#deleteVmBackups): dont delete undefined
Fixes https://xcp-ng.org/forum/topic/5331/backup-smart-mode-broken/6
Introduced by 7e302fd1c
2021-12-16 10:03:16 +01:00
Julien Fontanet
19ea78afc5 fix(xo-server): fix job matching for smart mode
Fixes https://xcp-ng.org/forum/topic/5331/backup-smart-mode-broken
Fixes #6058

Introduced by cf9f0da6e

XO VM objects have a `other` field instead of `other_config`.
2021-12-15 23:25:04 +01:00
Julien Fontanet
2096c782e3 feat(xo-server/api): new method backupNg.deleteVmBackups
Related to 7e302fd1c
2021-12-15 17:36:47 +01:00
Julien Fontanet
79a6a8a10c feat(proxy/api): new method backup.deleteVmBackups
Related to 7e302fd1c
2021-12-15 17:27:08 +01:00
Julien Fontanet
5a933bad93 fix(vhd-lib/merge): dont fail on invalid state file
Fixes zammad#4227
2021-12-15 16:36:18 +01:00
Julien Fontanet
7e302fd1cb feat(backups/RemoteAdapter): new method deleteVmBackups()
It's usually best to delete multiple backups at once instead of one by one because it allows some optimizations, for instance when merging unused VHDs.

This was already possible in private methods but not exposed in the public API.
2021-12-15 16:34:35 +01:00
127 changed files with 4316 additions and 2775 deletions

View File

@@ -4,7 +4,6 @@ about: Create a report to help us improve
title: ''
labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
assignees: ''
---
**Describe the bug**
@@ -12,6 +11,7 @@ A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
@@ -24,10 +24,11 @@ A clear and concise description of what you expected to happen.
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- Node: [e.g. 16.12.1]
- xo-server: [e.g. 5.82.3]
- xo-web: [e.g. 5.87.0]
- hypervisor: [e.g. XCP-ng 8.2.0]
- Node: [e.g. 16.12.1]
- xo-server: [e.g. 5.82.3]
- xo-web: [e.g. 5.87.0]
- hypervisor: [e.g. XCP-ng 8.2.0]
**Additional context**
Add any other context about the problem here.

View File

@@ -70,6 +70,25 @@ decorateMethodsWith(Foo, {
})
```
### `perInstance(fn, ...args)`
Helper to decorate the method by instance instead of for the whole class.
This is often necessary for caching or deduplicating calls.
```js
import { perInstance } from '@vates/decorateWith'
class Foo {
@decorateWith(perInstance, lodash.memoize)
bar() {
// body
}
}
```
Because it's a normal function, it can also be used with `decorateMethodsWith`, with `compose` or even by itself.
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -51,3 +51,22 @@ decorateMethodsWith(Foo, {
])
})
```
### `perInstance(fn, ...args)`
Helper to decorate the method by instance instead of for the whole class.
This is often necessary for caching or deduplicating calls.
```js
import { perInstance } from '@vates/decorateWith'
class Foo {
@decorateWith(perInstance, lodash.memoize)
bar() {
// body
}
}
```
Because it's a normal function, it can also be used with `decorateMethodsWith`, with `compose` or even by itself.

View File

@@ -19,3 +19,15 @@ exports.decorateMethodsWith = function decorateMethodsWith(klass, map) {
}
return klass
}
exports.perInstance = function perInstance(fn, decorator, ...args) {
const map = new WeakMap()
return function () {
let decorated = map.get(this)
if (decorated === undefined) {
decorated = decorator(fn, ...args)
map.set(this, decorated)
}
return decorated.apply(this, arguments)
}
}

View File

@@ -20,7 +20,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.0",
"version": "1.0.0",
"engines": {
"node": ">=8.10"
},

View File

@@ -30,7 +30,7 @@
"rimraf": "^3.0.0"
},
"dependencies": {
"@vates/decorate-with": "^0.1.0",
"@vates/decorate-with": "^1.0.0",
"@xen-orchestra/log": "^0.3.0",
"golike-defer": "^0.5.1",
"object-hash": "^2.0.1"

View File

@@ -7,8 +7,8 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.16.2",
"@xen-orchestra/fs": "^0.19.2",
"@xen-orchestra/backups": "^0.18.3",
"@xen-orchestra/fs": "^0.19.3",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",

View File

@@ -3,9 +3,10 @@ const Disposable = require('promise-toolbox/Disposable.js')
const fromCallback = require('promise-toolbox/fromCallback.js')
const fromEvent = require('promise-toolbox/fromEvent.js')
const pDefer = require('promise-toolbox/defer.js')
const groupBy = require('lodash/groupBy.js')
const { dirname, join, normalize, resolve } = require('path')
const { createLogger } = require('@xen-orchestra/log')
const { Constants, createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdSynthetic } = require('vhd-lib')
const { Constants, createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
const { deduped } = require('@vates/disposable/deduped.js')
const { execFile } = require('child_process')
const { readdir, stat } = require('fs-extra')
@@ -67,10 +68,11 @@ const debounceResourceFactory = factory =>
}
class RemoteAdapter {
constructor(handler, { debounceResource = res => res, dirMode } = {}) {
constructor(handler, { debounceResource = res => res, dirMode, vhdDirectoryCompression } = {}) {
this._debounceResource = debounceResource
this._dirMode = dirMode
this._handler = handler
this._vhdDirectoryCompression = vhdDirectoryCompression
}
get handler() {
@@ -190,6 +192,22 @@ class RemoteAdapter {
return files
}
// check if we will be allowed to merge a a vhd created in this adapter
// with the vhd at path `path`
async isMergeableParent(packedParentUid, path) {
return await Disposable.use(openVhd(this.handler, path), vhd => {
// this baseUuid is not linked with this vhd
if (!vhd.footer.uuid.equals(packedParentUid)) {
return false
}
const isVhdDirectory = vhd instanceof VhdDirectory
return isVhdDirectory
? this.#useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
: !this.#useVhdDirectory()
})
}
fetchPartitionFiles(diskId, partitionId, paths) {
const { promise, reject, resolve } = pDefer()
Disposable.use(
@@ -212,8 +230,8 @@ class RemoteAdapter {
async deleteDeltaVmBackups(backups) {
const handler = this._handler
// unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => VhdAbstract.unlink(handler, _filename))
// this will delete the json, unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
}
async deleteMetadataBackup(backupId) {
@@ -243,17 +261,40 @@ class RemoteAdapter {
)
}
async deleteVmBackup(filename) {
const metadata = JSON.parse(String(await this._handler.readFile(filename)))
metadata._filename = filename
deleteVmBackup(file) {
return this.deleteVmBackups([file])
}
if (metadata.mode === 'delta') {
await this.deleteDeltaVmBackups([metadata])
} else if (metadata.mode === 'full') {
await this.deleteFullVmBackups([metadata])
} else {
throw new Error(`no deleter for backup mode ${metadata.mode}`)
async deleteVmBackups(files) {
const { delta, full, ...others } = groupBy(await asyncMap(files, file => this.readVmBackupMetadata(file)), 'mode')
const unsupportedModes = Object.keys(others)
if (unsupportedModes.length !== 0) {
throw new Error('no deleter for backup modes: ' + unsupportedModes.join(', '))
}
await Promise.all([
delta !== undefined && this.deleteDeltaVmBackups(delta),
full !== undefined && this.deleteFullVmBackups(full),
])
const dirs = new Set(files.map(file => dirname(file)))
for (const dir of dirs) {
// don't merge in main process, unused VHDs will be merged in the next backup run
await this.cleanVm(dir, { remove: true, onLog: warn })
}
}
#getCompressionType() {
return this._vhdDirectoryCompression
}
#useVhdDirectory() {
return this.handler.type === 's3'
}
#useAlias() {
return this.#useVhdDirectory()
}
getDisk = Disposable.factory(this.getDisk)
@@ -312,13 +353,10 @@ class RemoteAdapter {
return yield this._getPartition(devicePath, await this._findPartition(devicePath, partitionId))
}
// this function will be the one where we plug the logic of the storage format by fs type/user settings
// if the file is named .vhd => vhd
// if the file is named alias.vhd => alias to a vhd
// if we use alias on this remote, we have to name the file alias.vhd
getVhdFileName(baseName) {
if (this._handler.type === 's3') {
return `${baseName}.alias.vhd` // we want an alias to a vhddirectory
if (this.#useAlias()) {
return `${baseName}.alias.vhd`
}
return `${baseName}.vhd`
}
@@ -327,9 +365,14 @@ class RemoteAdapter {
const handler = this._handler
const backups = { __proto__: null }
await asyncMap(await handler.list(BACKUP_DIR), async vmUuid => {
const vmBackups = await this.listVmBackups(vmUuid)
backups[vmUuid] = vmBackups
await asyncMap(await handler.list(BACKUP_DIR), async entry => {
// ignore hidden and lock files
if (entry[0] !== '.' && !entry.endsWith('.lock')) {
const vmBackups = await this.listVmBackups(entry)
if (vmBackups.length !== 0) {
backups[entry] = vmBackups
}
}
})
return backups
@@ -470,10 +513,11 @@ class RemoteAdapter {
async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
const handler = this._handler
if (path.endsWith('.alias.vhd')) {
if (this.#useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: 16,
compression: this.#getCompressionType(),
async validator() {
await input.task
return validator.apply(this, arguments)

View File

@@ -251,6 +251,20 @@ exports.VmBackup = class VmBackup {
const timestamp = Date.now()
const progress = {
handle: setInterval(() => {
const { size } = sizeContainer
const timestamp = Date.now()
Task.info('transfer', {
speed: (size - progress.size) / (timestamp - progress.timestamp),
})
progress.size = size
progress.timestamp = timestamp
}, 5e3 * 60),
size: sizeContainer.size,
timestamp,
}
await this._callWriters(
writer =>
writer.run({
@@ -261,6 +275,8 @@ exports.VmBackup = class VmBackup {
'writer.run()'
)
clearInterval(progress.handle)
const { size } = sizeContainer
const end = Date.now()
const duration = end - timestamp

View File

@@ -70,6 +70,7 @@ class BackupWorker {
yield new RemoteAdapter(handler, {
debounceResource: this.debounceResource,
dirMode: this.#config.dirMode,
vhdDirectoryCompression: this.#config.vhdDirectoryCompression,
})
} finally {
await handler.forget()

View File

@@ -9,6 +9,8 @@ const crypto = require('crypto')
const { RemoteAdapter } = require('./RemoteAdapter')
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
const { checkAliases } = require('./_cleanVm')
const { dirname, basename } = require('path')
let tempDir, adapter, handler, jobId, vdiId, basePath
@@ -35,7 +37,11 @@ const uniqueId = () => crypto.randomBytes(16).toString('hex')
async function generateVhd(path, opts = {}) {
let vhd
const dataPath = opts.useAlias ? path + '.data' : path
let dataPath = path
if (opts.useAlias) {
await handler.mkdir(dirname(path) + '/data/')
dataPath = dirname(path) + '/data/' + basename(path)
}
if (opts.mode === 'directory') {
await handler.mkdir(dataPath)
vhd = new VhdDirectory(handler, dataPath)
@@ -162,7 +168,7 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
`${basePath}/deleted.vhd`, // in metadata but not in vhds
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
// abandonned.json is not here
// abandonned.vhd is not here anymore
],
}),
{ flags: 'w' }
@@ -178,7 +184,7 @@ test('it merges delta of non destroyed chain', async () => {
`metadata.json`,
JSON.stringify({
mode: 'delta',
size: 209920,
size: 12000, // a size too small
vhds: [
`${basePath}/grandchild.vhd`, // grand child should not be merged
`${basePath}/child.vhd`,
@@ -204,20 +210,25 @@ test('it merges delta of non destroyed chain', async () => {
},
})
let loggued = ''
let loggued = []
const onLog = message => {
loggued += message + '\n'
loggued.push(message)
}
await adapter.cleanVm('/', { remove: true, onLog })
expect(loggued).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused\n`)
loggued = ''
expect(loggued[0]).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`)
expect(loggued[1]).toEqual(`incorrect size in metadata: 12000 instead of 209920`)
loggued = []
await adapter.cleanVm('/', { remove: true, merge: true, onLog })
const [unused, merging] = loggued.split('\n')
const [unused, merging] = loggued
expect(unused).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`)
expect(merging).toEqual(`merging /${basePath}/child.vhd into /${basePath}/orphan.vhd`)
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children after the merge
expect(metadata.size).toEqual(209920)
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
const remainingVhds = await handler.list(basePath)
expect(remainingVhds.length).toEqual(2)
@@ -231,11 +242,7 @@ test('it finish unterminated merge ', async () => {
JSON.stringify({
mode: 'delta',
size: 209920,
vhds: [
`${basePath}/orphan.vhd`, // grand child should not be merged
`${basePath}/child.vhd`,
// orphan is not here, he should be merged in child
],
vhds: [`${basePath}/orphan.vhd`, `${basePath}/child.vhd`],
})
)
@@ -261,7 +268,6 @@ test('it finish unterminated merge ', async () => {
})
)
// a unfinished merging
await adapter.cleanVm('/', { remove: true, merge: true })
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
@@ -274,12 +280,17 @@ test('it finish unterminated merge ', async () => {
// each of the vhd can be a file, a directory, an alias to a file or an alias to a directory
// the message an resulting files should be identical to the output with vhd files which is tested independantly
describe('tests mulitple combination ', () => {
describe('tests multiple combination ', () => {
for (const useAlias of [true, false]) {
for (const vhdMode of ['file', 'directory']) {
test(`alias : ${useAlias}, mode: ${vhdMode}`, async () => {
// a broken VHD
const brokenVhdDataPath = basePath + useAlias ? 'broken.data' : 'broken.vhd'
if (useAlias) {
await handler.mkdir(basePath + '/data')
}
const brokenVhdDataPath = basePath + (useAlias ? '/data/broken.vhd' : '/broken.vhd')
if (vhdMode === 'directory') {
await handler.mkdir(brokenVhdDataPath)
} else {
@@ -300,6 +311,7 @@ describe('tests mulitple combination ', () => {
parentUid: crypto.randomBytes(16),
},
})
// an ancestor of a vhd present in metadata
const ancestor = await generateVhd(`${basePath}/ancestor.vhd`, {
useAlias,
@@ -362,22 +374,29 @@ describe('tests mulitple combination ', () => {
],
})
)
await adapter.cleanVm('/', { remove: true, merge: true })
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children + clean after the merge
expect(metadata.size).toEqual(vhdMode === 'file' ? 314880 : undefined)
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
// ancestor and child should be merged
// grand child and clean vhd should not have changed
const survivors = await handler.list(basePath)
// console.log(survivors)
if (useAlias) {
const dataSurvivors = await handler.list(basePath + '/data')
// the goal of the alias : do not move a full folder
expect(survivors).toContain('ancestor.vhd.data')
expect(survivors).toContain('grandchild.vhd.data')
expect(survivors).toContain('cleanAncestor.vhd.data')
expect(dataSurvivors).toContain('ancestor.vhd')
expect(dataSurvivors).toContain('grandchild.vhd')
expect(dataSurvivors).toContain('cleanAncestor.vhd')
expect(survivors).toContain('clean.vhd.alias.vhd')
expect(survivors).toContain('child.vhd.alias.vhd')
expect(survivors).toContain('grandchild.vhd.alias.vhd')
expect(survivors.length).toEqual(6)
expect(survivors.length).toEqual(4) // the 3 ok + data
expect(dataSurvivors.length).toEqual(3) // the 3 ok + data
} else {
expect(survivors).toContain('clean.vhd')
expect(survivors).toContain('child.vhd')
@@ -388,3 +407,31 @@ describe('tests mulitple combination ', () => {
}
}
})
test('it cleans orphan merge states ', async () => {
await handler.writeFile(`${basePath}/.orphan.vhd.merge.json`, '')
await adapter.cleanVm('/', { remove: true })
expect(await handler.list(basePath)).toEqual([])
})
test('check Aliases should work alone', async () => {
await handler.mkdir('vhds')
await handler.mkdir('vhds/data')
await generateVhd(`vhds/data/ok.vhd`)
await VhdAbstract.createAlias(handler, 'vhds/ok.alias.vhd', 'vhds/data/ok.vhd')
await VhdAbstract.createAlias(handler, 'vhds/missingData.alias.vhd', 'vhds/data/nonexistent.vhd')
await generateVhd(`vhds/data/missingalias.vhd`)
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', { remove: true, handler })
// only ok have suvived
const alias = (await handler.list('vhds')).filter(f => f.endsWith('.vhd'))
expect(alias.length).toEqual(1)
const data = await handler.list('vhds/data')
expect(data.length).toEqual(1)
})

View File

@@ -2,6 +2,7 @@ const assert = require('assert')
const sum = require('lodash/sum')
const { asyncMap } = require('@xen-orchestra/async-map')
const { Constants, mergeVhd, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
const { dirname, resolve } = require('path')
const { DISK_TYPES } = Constants
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
@@ -10,6 +11,24 @@ const { limitConcurrency } = require('limit-concurrency-decorator')
const { Task } = require('./Task.js')
const { Disposable } = require('promise-toolbox')
// checking the size of a vhd directory is costly
// 1 Http Query per 1000 blocks
// we only check size of all the vhd are VhdFiles
function shouldComputeVhdsSize(vhds) {
return vhds.every(vhd => vhd instanceof VhdFile)
}
const computeVhdsSize = (handler, vhdPaths) =>
Disposable.use(
vhdPaths.map(vhdPath => openVhd(handler, vhdPath)),
async vhds => {
if (shouldComputeVhdsSize(vhds)) {
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
return sum(sizes)
}
}
)
// chain is an array of VHDs from child to parent
//
// the whole chain will be merged into parent, parent will be renamed to child
@@ -64,7 +83,6 @@ async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
)
clearInterval(handle)
await Promise.all([
VhdAbstract.rename(handler, parent, child),
asyncMap(children.slice(0, -1), child => {
@@ -82,10 +100,11 @@ async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
const noop = Function.prototype
const INTERRUPTED_VHDS_REG = /^(?:(.+)\/)?\.(.+)\.merge.json$/
const INTERRUPTED_VHDS_REG = /^\.(.+)\.merge.json$/
const listVhds = async (handler, vmDir) => {
const vhds = []
const interruptedVhds = new Set()
const vhds = new Set()
const aliases = {}
const interruptedVhds = new Map()
await asyncMap(
await handler.list(`${vmDir}/vdis`, {
@@ -100,25 +119,77 @@ const listVhds = async (handler, vmDir) => {
async vdiDir => {
const list = await handler.list(vdiDir, {
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
prependDir: true,
})
aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd)).map(file => `${vdiDir}/${file}`)
list.forEach(file => {
const res = INTERRUPTED_VHDS_REG.exec(file)
if (res === null) {
vhds.push(file)
vhds.add(`${vdiDir}/${file}`)
} else {
const [, dir, file] = res
interruptedVhds.add(`${dir}/${file}`)
interruptedVhds.set(`${vdiDir}/${res[1]}`, `${vdiDir}/${file}`)
}
})
}
)
)
return { vhds, interruptedVhds }
return { vhds, interruptedVhds, aliases }
}
async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog = noop, remove = false }) {
const aliasFound = []
for (const path of aliasPaths) {
const target = await resolveVhdAlias(handler, path)
if (!isVhdFile(target)) {
onLog(`Alias ${path} references a non vhd target: ${target}`)
if (remove) {
await handler.unlink(target)
await handler.unlink(path)
}
continue
}
try {
const { dispose } = await openVhd(handler, target)
try {
await dispose()
} catch (e) {
// error during dispose should not trigger a deletion
}
} catch (error) {
onLog(`target ${target} of alias ${path} is missing or broken`, { error })
if (remove) {
try {
await VhdAbstract.unlink(handler, path)
} catch (e) {
if (e.code !== 'ENOENT') {
onLog(`Error while deleting target ${target} of alias ${path}`, { error: e })
}
}
}
continue
}
aliasFound.push(resolve('/', target))
}
const entries = await handler.list(targetDataRepository, {
ignoreMissing: true,
prependDir: true,
})
entries.forEach(async entry => {
if (!aliasFound.includes(entry)) {
onLog(`the Vhd ${entry} is not referenced by a an alias`)
if (remove) {
await VhdAbstract.unlink(handler, entry)
}
}
})
}
exports.checkAliases = checkAliases
const defaultMergeLimiter = limitConcurrency(1)
exports.cleanVm = async function cleanVm(
@@ -129,17 +200,16 @@ exports.cleanVm = async function cleanVm(
const handler = this._handler
const vhds = new Set()
const vhdsToJSons = new Set()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
const vhdsList = await listVhds(handler, vmDir)
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir)
// remove broken VHDs
await asyncMap(vhdsList.vhds, async path => {
await asyncMap(vhds, async path => {
try {
await Disposable.use(openVhd(handler, path, { checkSecondFooter: !vhdsList.interruptedVhds.has(path) }), vhd => {
vhds.add(path)
await Disposable.use(openVhd(handler, path, { checkSecondFooter: !interruptedVhds.has(path) }), vhd => {
if (vhd.footer.diskType === DISK_TYPES.DIFFERENCING) {
const parent = resolve('/', dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
@@ -154,6 +224,7 @@ exports.cleanVm = async function cleanVm(
}
})
} catch (error) {
vhds.delete(path)
onLog(`error while checking the VHD with path ${path}`, { error })
if (error?.code === 'ERR_ASSERTION' && remove) {
onLog(`deleting broken ${path}`)
@@ -162,7 +233,28 @@ exports.cleanVm = async function cleanVm(
}
})
// @todo : add check for data folder of alias not referenced in a valid alias
// remove interrupted merge states for missing VHDs
for (const interruptedVhd of interruptedVhds.keys()) {
if (!vhds.has(interruptedVhd)) {
const statePath = interruptedVhds.get(interruptedVhd)
interruptedVhds.delete(interruptedVhd)
onLog('orphan merge state', {
mergeStatePath: statePath,
missingVhdPath: interruptedVhd,
})
if (remove) {
onLog(`deleting orphan merge state ${statePath}`)
await handler.unlink(statePath)
}
}
}
// check if alias are correct
// check if all vhd in data subfolder have a corresponding alias
await asyncMap(Object.keys(aliases), async dir => {
await checkAliases(aliases[dir], `${dir}/data`, { handler, onLog, remove })
})
// remove VHDs with missing ancestors
{
@@ -202,7 +294,7 @@ exports.cleanVm = async function cleanVm(
await Promise.all(deletions)
}
const jsons = []
const jsons = new Set()
const xvas = new Set()
const xvaSums = []
const entries = await handler.list(vmDir, {
@@ -210,7 +302,7 @@ exports.cleanVm = async function cleanVm(
})
entries.forEach(path => {
if (isMetadataFile(path)) {
jsons.push(path)
jsons.add(path)
} else if (isXvaFile(path)) {
xvas.add(path)
} else if (isXvaSumFile(path)) {
@@ -232,22 +324,25 @@ exports.cleanVm = async function cleanVm(
// compile the list of unused XVAs and VHDs, and remove backup metadata which
// reference a missing XVA/VHD
await asyncMap(jsons, async json => {
const metadata = JSON.parse(await handler.readFile(json))
let metadata
try {
metadata = JSON.parse(await handler.readFile(json))
} catch (error) {
onLog(`failed to read metadata file ${json}`, { error })
jsons.delete(json)
return
}
const { mode } = metadata
let size
if (mode === 'full') {
const linkedXva = resolve('/', vmDir, metadata.xva)
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
size = await handler.getSize(linkedXva).catch(error => {
onLog(`failed to get size of ${json}`, { error })
})
} else {
onLog(`the XVA linked to the metadata ${json} is missing`)
if (remove) {
onLog(`deleting incomplete backup ${json}`)
jsons.delete(json)
await handler.unlink(json)
}
}
@@ -263,46 +358,18 @@ exports.cleanVm = async function cleanVm(
// possible (existing disks) even if one disk is missing
if (missingVhds.length === 0) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
// checking the size of a vhd directory is costly
// 1 Http Query per 1000 blocks
// we only check size of all the vhd are VhdFiles
const shouldComputeSize = linkedVhds.every(vhd => vhd instanceof VhdFile)
if (shouldComputeSize) {
try {
await Disposable.use(Disposable.all(linkedVhds.map(vhdPath => openVhd(handler, vhdPath))), async vhds => {
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
size = sum(sizes)
})
} catch (error) {
onLog(`failed to get size of ${json}`, { error })
}
}
linkedVhds.forEach(path => {
vhdsToJSons[path] = json
})
} else {
onLog(`Some VHDs linked to the metadata ${json} are missing`, { missingVhds })
if (remove) {
onLog(`deleting incomplete backup ${json}`)
jsons.delete(json)
await handler.unlink(json)
}
}
}
const metadataSize = metadata.size
if (size !== undefined && metadataSize !== size) {
onLog(`incorrect size in metadata: ${metadataSize ?? 'none'} instead of ${size}`)
// don't update if the the stored size is greater than found files,
// it can indicates a problem
if (fixMetadata && (metadataSize === undefined || metadataSize < size)) {
try {
metadata.size = size
await handler.writeFile(json, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
onLog(`failed to update size in backup metadata ${json}`, { error })
}
}
}
})
// TODO: parallelize by vm/job/vdi
@@ -350,9 +417,9 @@ exports.cleanVm = async function cleanVm(
})
// merge interrupted VHDs
vhdsList.interruptedVhds.forEach(parent => {
for (const parent of interruptedVhds.keys()) {
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
})
}
Object.values(vhdChainsToMerge).forEach(chain => {
if (chain !== undefined) {
@@ -361,9 +428,15 @@ exports.cleanVm = async function cleanVm(
})
}
const doMerge = () => {
const promise = asyncMap(toMerge, async chain => limitedMergeVhdChain(chain, { handler, onLog, remove, merge }))
return merge ? promise.then(sizes => ({ size: sum(sizes) })) : promise
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(chain, { handler, onLog, remove, merge })
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
}
})
}
await Promise.all([
@@ -388,6 +461,52 @@ exports.cleanVm = async function cleanVm(
}),
])
// update size for delta metadata with merged VHD
// check for the other that the size is the same as the real file size
await asyncMap(jsons, async metadataPath => {
const metadata = JSON.parse(await handler.readFile(metadataPath))
let fileSystemSize
const merged = metadataWithMergedVhd[metadataPath] !== undefined
const { mode, size, vhds, xva } = metadata
try {
if (mode === 'full') {
// a full backup : check size
const linkedXva = resolve('/', vmDir, xva)
fileSystemSize = await handler.getSize(linkedXva)
} else if (mode === 'delta') {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
// the size is not computed in some cases (e.g. VhdDirectory)
if (fileSystemSize === undefined) {
return
}
// don't warn if the size has changed after a merge
if (!merged && fileSystemSize !== size) {
onLog(`incorrect size in metadata: ${size ?? 'none'} instead of ${fileSystemSize}`)
}
}
} catch (error) {
onLog(`failed to get size of ${metadataPath}`, { error })
return
}
// systematically update size after a merge
if ((merged || fixMetadata) && size !== fileSystemSize) {
metadata.size = fileSystemSize
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
onLog(`failed to update size in backup metadata ${metadataPath} after merge`, { error })
}
}
})
return {
// boolean whether some VHDs were merged (or should be merged)
merge: toMerge.length !== 0,

View File

@@ -1,11 +1,24 @@
const assert = require('assert')
const isGzipFile = async (handler, fd) => {
const COMPRESSED_MAGIC_NUMBERS = [
// https://tools.ietf.org/html/rfc1952.html#page-5
const magicNumber = Buffer.allocUnsafe(2)
Buffer.from('1F8B', 'hex'),
assert.strictEqual((await handler.read(fd, magicNumber, 0)).bytesRead, magicNumber.length)
return magicNumber[0] === 31 && magicNumber[1] === 139
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#zstandard-frames
Buffer.from('28B52FFD', 'hex'),
]
const MAGIC_NUMBER_MAX_LENGTH = Math.max(...COMPRESSED_MAGIC_NUMBERS.map(_ => _.length))
const isCompressedFile = async (handler, fd) => {
const header = Buffer.allocUnsafe(MAGIC_NUMBER_MAX_LENGTH)
assert.strictEqual((await handler.read(fd, header, 0)).bytesRead, header.length)
for (const magicNumber of COMPRESSED_MAGIC_NUMBERS) {
if (magicNumber.compare(header, 0, magicNumber.length) === 0) {
return true
}
}
return false
}
// TODO: better check?
@@ -43,8 +56,8 @@ async function isValidXva(path) {
return false
}
return (await isGzipFile(handler, fd))
? true // gzip files cannot be validated at this time
return (await isCompressedFile(handler, fd))
? true // compressed files cannot be validated at this time
: await isValidTar(handler, size, fd)
} finally {
handler.closeFile(fd).catch(noop)

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.16.2",
"version": "0.18.3",
"engines": {
"node": ">=14.6"
},
@@ -20,7 +20,7 @@
"@vates/disposable": "^0.1.1",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^0.19.2",
"@xen-orchestra/fs": "^0.19.3",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^4.0.1",
@@ -36,11 +36,11 @@
"proper-lockfile": "^4.1.2",
"pump": "^3.0.0",
"uuid": "^8.3.2",
"vhd-lib": "^2.0.3",
"vhd-lib": "^3.0.0",
"yazl": "^2.5.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^0.8.4"
"@xen-orchestra/xapi": "^0.8.5"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -24,6 +24,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
async checkBaseVdis(baseUuidToSrcVdi) {
const { handler } = this._adapter
const backup = this._backup
const adapter = this._adapter
const backupDir = getVmBackupDir(backup.vm.uuid)
const vdisDir = `${backupDir}/vdis/${backup.job.id}`
@@ -35,13 +36,18 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
prependDir: true,
})
const packedBaseUuid = packUuid(baseUuid)
await asyncMap(vhds, async path => {
try {
await checkVhdChain(handler, path)
await Disposable.use(
openVhd(handler, path),
vhd => (found = found || vhd.footer.uuid.equals(packUuid(baseUuid)))
)
// Warning, this should not be written as found = found || await adapter.isMergeableParent(packedBaseUuid, path)
//
// since all the checks of a path are done in parallel, found would be containing
// only the last answer of isMergeableParent which is probably not the right one
// this led to the support tickets https://help.vates.fr/#ticket/zoom/4751 , 4729, 4665 and 4300
const isMergeable = await adapter.isMergeableParent(packedBaseUuid, path)
found = found || isMergeable
} catch (error) {
warn('checkBaseVdis', { error })
await ignoreErrors.call(VhdAbstract.unlink(handler, path))

View File

@@ -44,13 +44,14 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
async afterBackup() {
const { disableMergeWorker } = this._backup.config
const { merge } = await this._cleanVm({ remove: true, merge: disableMergeWorker })
await this.#lock.dispose()
// merge worker only compatible with local remotes
const { handler } = this._adapter
if (merge && !disableMergeWorker && typeof handler._getRealPath === 'function') {
const willMergeInWorker = !disableMergeWorker && typeof handler._getRealPath === 'function'
const { merge } = await this._cleanVm({ remove: true, merge: !willMergeInWorker })
await this.#lock.dispose()
if (merge && willMergeInWorker) {
const taskFile =
join(MergeWorker.CLEAN_VM_QUEUE, formatFilenameDate(new Date())) +
'-' +

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "0.19.2",
"version": "0.19.3",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
@@ -21,7 +21,9 @@
"@sindresorhus/df": "^3.1.1",
"@sullux/aws-sdk": "^1.0.5",
"@vates/coalesce-calls": "^0.1.0",
"@vates/decorate-with": "^1.0.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.3.0",
"aws-sdk": "^2.686.0",
"decorator-synchronized": "^0.6.0",
"execa": "^5.0.0",

View File

@@ -2,9 +2,13 @@ import aws from '@sullux/aws-sdk'
import assert from 'assert'
import http from 'http'
import https from 'https'
import pRetry from 'promise-toolbox/retry'
import { createLogger } from '@xen-orchestra/log'
import { decorateWith } from '@vates/decorate-with'
import { parse } from 'xo-remote-parser'
import RemoteHandlerAbstract from './abstract'
import { asyncEach } from '@vates/async-each'
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
@@ -14,6 +18,9 @@ const MAX_PART_SIZE = 1024 * 1024 * 1024 * 5 // 5GB
const MAX_PARTS_COUNT = 10000
const MAX_OBJECT_SIZE = 1024 * 1024 * 1024 * 1024 * 5 // 5TB
const IDEAL_FRAGMENT_SIZE = Math.ceil(MAX_OBJECT_SIZE / MAX_PARTS_COUNT) // the smallest fragment size that still allows a 5TB upload in 10000 fragments, about 524MB
const { warn } = createLogger('xo:fs:s3')
export default class S3Handler extends RemoteHandlerAbstract {
constructor(remote, _opts) {
super(remote)
@@ -57,10 +64,11 @@ export default class S3Handler extends RemoteHandlerAbstract {
return { Bucket: this._bucket, Key: this._dir + file }
}
async _copy(oldPath, newPath) {
async _multipartCopy(oldPath, newPath) {
const size = await this._getSize(oldPath)
const CopySource = `/${this._bucket}/${this._dir}${oldPath}`
const multipartParams = await this._s3.createMultipartUpload({ ...this._createParams(newPath) })
const param2 = { ...multipartParams, CopySource: `/${this._bucket}/${this._dir}${oldPath}` }
const param2 = { ...multipartParams, CopySource }
try {
const parts = []
let start = 0
@@ -78,6 +86,22 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
}
async _copy(oldPath, newPath) {
const CopySource = `/${this._bucket}/${this._dir}${oldPath}`
try {
await this._s3.copyObject({
...this._createParams(newPath),
CopySource,
})
} catch (e) {
// object > 5GB must be copied part by part
if (e.code === 'EntityTooLarge') {
return this._multipartCopy(oldPath, newPath)
}
throw e
}
}
async _isNotEmptyDir(path) {
const result = await this._s3.listObjectsV2({
Bucket: this._bucket,
@@ -117,6 +141,21 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
}
// some objectstorage provider like backblaze, can answer a 500/503 routinely
// in this case we should retry, and let their load balancing do its magic
// https://www.backblaze.com/b2/docs/calling.html#error_handling
@decorateWith(pRetry.wrap, {
delays: [100, 200, 500, 1000, 2000],
when: e => e.code === 'InternalError',
onRetry(error) {
warn('retrying writing file', {
attemptNumber: this.attemptNumber,
delay: this.delay,
error,
file: this.arguments[0],
})
},
})
async _writeFile(file, data, options) {
return this._s3.putObject({ ...this._createParams(file), Body: data })
}
@@ -156,7 +195,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
Delimiter: '/', // will only return path until delimiters
})
if (result.isTruncated) {
if (result.IsTruncated) {
const error = new Error('more than 1000 objects, unsupported in this implementation')
error.dir = dir
throw error
@@ -246,16 +285,22 @@ export default class S3Handler extends RemoteHandlerAbstract {
Prefix: this._dir + path + '/',
ContinuationToken: NextContinuationToken,
})
NextContinuationToken = result.isTruncated ? null : result.NextContinuationToken
for (const { Key } of result.Contents) {
// _unlink will add the prefix, but Key contains everything
// also we don't need to check if we delete a directory, since the list only return files
await this._s3.deleteObject({
Bucket: this._bucket,
Key,
})
}
} while (NextContinuationToken !== null)
NextContinuationToken = result.IsTruncated ? result.NextContinuationToken : undefined
await asyncEach(
result.Contents,
async ({ Key }) => {
// _unlink will add the prefix, but Key contains everything
// also we don't need to check if we delete a directory, since the list only return files
await this._s3.deleteObject({
Bucket: this._bucket,
Key,
})
},
{
concurrency: 16,
}
)
} while (NextContinuationToken !== undefined)
}
async _write(file, buffer, position) {

View File

@@ -19,7 +19,7 @@
"node": ">=6"
},
"dependencies": {
"bind-property-descriptor": "^1.0.0",
"bind-property-descriptor": "^2.0.0",
"lodash": "^4.17.21"
},
"scripts": {

View File

@@ -32,7 +32,7 @@ module.exports = class Config {
get(path) {
const value = get(this._config, path)
if (value === undefined) {
throw new TypeError('missing config entry: ' + value)
throw new TypeError('missing config entry: ' + path)
}
return value
}

View File

@@ -14,7 +14,7 @@
"url": "https://vates.fr"
},
"license": "AGPL-3.0-or-later",
"version": "0.1.1",
"version": "0.1.2",
"engines": {
"node": ">=12"
},
@@ -22,7 +22,7 @@
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/emit-async": "^0.1.0",
"@xen-orchestra/log": "^0.3.0",
"app-conf": "^0.9.0",
"app-conf": "^1.0.0",
"lodash": "^4.17.21"
},
"scripts": {

View File

@@ -29,7 +29,7 @@
"@iarna/toml": "^2.2.0",
"@vates/read-chunk": "^0.1.2",
"ansi-colors": "^4.1.1",
"app-conf": "^0.9.0",
"app-conf": "^1.0.0",
"content-type": "^1.0.4",
"cson-parser": "^4.0.7",
"getopts": "^2.2.3",

View File

@@ -20,6 +20,7 @@ keepAliveInterval = 10e3
dirMode = 0o700
disableMergeWorker = false
snapshotNameLabelTpl = '[XO Backup {job.name}] {vm.name_label}'
vhdDirectoryCompression = 'brotli'
[backups.defaultSettings]
reportWhen = 'failure'
@@ -87,3 +88,20 @@ ignoreNobakVdis = false
maxUncoalescedVdis = 1
watchEvents = ['network', 'PIF', 'pool', 'SR', 'task', 'VBD', 'VDI', 'VIF', 'VM']
#compact mode
[reverseProxies]
# '/http/' = 'http://localhost:8081/'
#The target can have a path ( like `http://target/sub/directory/`),
# parameters (`?param=one`) and hash (`#jwt:32154`) that are automatically added to all queries transfered by the proxy.
# If a parameter is present in the configuration and in the query, only the config parameter is transferred.
# '/another' = http://hiddenServer:8765/path/
# And use the extended mode when required
# The additionnal options of a proxy's configuraiton's section are used to instantiate the `https` Agent(respectively the `http`).
# A notable option is `rejectUnauthorized` which allow to connect to a HTTPS backend with an invalid/ self signed certificate
#[reverseProxies.'/https/']
# target = 'https://localhost:8080/'
# rejectUnauthorized = false

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.15.5",
"version": "0.17.3",
"license": "AGPL-3.0-or-later",
"description": "XO Proxy used to remotely execute backup jobs",
"keywords": [
@@ -22,30 +22,31 @@
"xo-proxy": "dist/index.mjs"
},
"engines": {
"node": ">=14.13"
"node": ">=14.18"
},
"dependencies": {
"@iarna/toml": "^2.2.0",
"@koa/router": "^10.0.0",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^0.1.0",
"@vates/decorate-with": "^1.0.0",
"@vates/disposable": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.16.2",
"@xen-orchestra/fs": "^0.19.2",
"@xen-orchestra/backups": "^0.18.3",
"@xen-orchestra/fs": "^0.19.3",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.1.1",
"@xen-orchestra/mixins": "^0.1.2",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/xapi": "^0.8.4",
"@xen-orchestra/xapi": "^0.8.5",
"ajv": "^8.0.3",
"app-conf": "^0.9.0",
"app-conf": "^1.0.0",
"async-iterator-to-stream": "^1.1.0",
"fs-extra": "^10.0.0",
"get-stream": "^6.0.0",
"getopts": "^2.2.3",
"golike-defer": "^0.5.1",
"http-server-plus": "^0.11.0",
"http2-proxy": "^5.0.53",
"json-rpc-protocol": "^0.13.1",
"jsonrpc-websocket-client": "^0.7.2",
"koa": "^2.5.1",
@@ -57,7 +58,7 @@
"promise-toolbox": "^0.20.0",
"source-map-support": "^0.5.16",
"stoppable": "^1.0.6",
"xdg-basedir": "^4.0.0",
"xdg-basedir": "^5.1.0",
"xen-api": "^0.35.1",
"xo-common": "^0.7.0"
},

View File

@@ -14,7 +14,7 @@ import { createLogger } from '@xen-orchestra/log'
const { debug, warn } = createLogger('xo:proxy:api')
const ndJsonStream = asyncIteratorToStream(async function*(responseId, iterable) {
const ndJsonStream = asyncIteratorToStream(async function* (responseId, iterable) {
try {
let cursor, iterator
try {
@@ -45,14 +45,14 @@ export default class Api {
constructor(app, { appVersion, httpServer }) {
this._ajv = new Ajv({ allErrors: true })
this._methods = { __proto__: null }
const router = new Router({ prefix: '/api/v1' }).post('/', async ctx => {
const PREFIX = '/api/v1'
const router = new Router({ prefix: PREFIX }).post('/', async ctx => {
// Before Node 13.0 there was an inactivity timeout of 2 mins, which may
// not be enough for the API.
ctx.req.setTimeout(0)
const profile = await app.authentication.findProfile({
authenticationToken: ctx.cookies.get('authenticationToken')
authenticationToken: ctx.cookies.get('authenticationToken'),
})
if (profile === undefined) {
ctx.status = 401
@@ -102,6 +102,7 @@ export default class Api {
// breaks, send some data every 10s to keep it opened.
const stopTimer = clearInterval.bind(
undefined,
// @to check : can this add space inside binary data ?
setInterval(() => stream.push(' '), keepAliveInterval)
)
stream.on('end', stopTimer).on('error', stopTimer)
@@ -118,12 +119,19 @@ export default class Api {
.use(router.routes())
.use(router.allowedMethods())
httpServer.on('request', koa.callback())
const callback = koa.callback()
httpServer.on('request', (req, res) => {
// only answers to query to the root url of this mixin
// do it before giving the request to Koa to ensure it's not modified
if (req.url.startsWith(PREFIX)) {
callback(req, res)
}
})
this.addMethods({
system: {
getMethodsInfo: [
function*() {
function* () {
const methods = this._methods
for (const name in methods) {
const { description, params = {} } = methods[name]
@@ -131,25 +139,25 @@ export default class Api {
}
}.bind(this),
{
description: 'returns the signatures of all available API methods'
}
description: 'returns the signatures of all available API methods',
},
],
getServerVersion: [
() => appVersion,
{
description: 'returns the version of xo-server'
}
description: 'returns the version of xo-server',
},
],
listMethods: [
function*() {
function* () {
const methods = this._methods
for (const name in methods) {
yield name
}
}.bind(this),
{
description: 'returns the name of all available API methods'
}
description: 'returns the name of all available API methods',
},
],
methodSignature: [
({ method: name }) => {
@@ -164,14 +172,14 @@ export default class Api {
{
description: 'returns the signature of an API method',
params: {
method: { type: 'string' }
}
}
]
method: { type: 'string' },
},
},
],
},
test: {
range: [
function*({ start = 0, stop, step }) {
function* ({ start = 0, stop, step }) {
if (step === undefined) {
step = start > stop ? -1 : 1
}
@@ -189,11 +197,11 @@ export default class Api {
params: {
start: { optional: true, type: 'number' },
step: { optional: true, type: 'number' },
stop: { type: 'number' }
}
}
]
}
stop: { type: 'number' },
},
},
],
},
})
}
@@ -220,7 +228,7 @@ export default class Api {
return required
}),
type: 'object'
type: 'object',
})
const m = params => {

View File

@@ -1,6 +1,6 @@
import assert from 'assert'
import fse from 'fs-extra'
import xdg from 'xdg-basedir'
import { xdgConfig } from 'xdg-basedir'
import { createLogger } from '@xen-orchestra/log'
import { execFileSync } from 'child_process'
@@ -20,7 +20,7 @@ export default class Authentication {
// save this token in the automatically handled conf file
fse.outputFileSync(
// this file must take precedence over normal user config
`${xdg.config}/${appName}/config.z-auto.json`,
`${xdgConfig}/${appName}/config.z-auto.json`,
JSON.stringify({ authenticationToken: token }),
{ mode: 0o600 }
)

View File

@@ -164,6 +164,17 @@ export default class Backups {
},
},
],
deleteVmBackups: [
({ filenames, remote }) =>
Disposable.use(this.getAdapter(remote), adapter => adapter.deleteVmBackups(filenames)),
{
description: 'delete VM backups',
params: {
filenames: { type: 'array', items: { type: 'string' } },
remote: { type: 'object' },
},
},
],
fetchPartitionFiles: [
({ disk: diskId, remote, partition: partitionId, paths }) =>
Disposable.use(this.getAdapter(remote), adapter => adapter.fetchPartitionFiles(diskId, partitionId, paths)),
@@ -403,6 +414,7 @@ export default class Backups {
return new RemoteAdapter(yield app.remotes.getHandler(remote), {
debounceResource: app.debounceResource.bind(app),
dirMode: app.config.get('backups.dirMode'),
vhdDirectoryCompression: app.config.get('backups.vhdDirectoryCompression'),
})
}

View File

@@ -0,0 +1,120 @@
import { urlToHttpOptions } from 'url'
import proxy from 'http2-proxy'
function removeSlash(str) {
return str.replace(/^\/|\/$/g, '')
}
function mergeUrl(relative, base) {
const res = new URL(base)
const relativeUrl = new URL(relative, base)
res.pathname = relativeUrl.pathname
relativeUrl.searchParams.forEach((value, name) => {
// we do not allow to modify params already specified by config
if (!res.searchParams.has(name)) {
res.searchParams.append(name, value)
}
})
res.hash = relativeUrl.hash.length > 0 ? relativeUrl.hash : res.hash
return res
}
export function backendToLocalPath(basePath, target, backendUrl) {
// keep redirect url relative to local server
const localPath = `${basePath}/${backendUrl.pathname.substring(target.pathname.length)}${backendUrl.search}${
backendUrl.hash
}`
return localPath
}
export function localToBackendUrl(basePath, target, localPath) {
let localPathWithoutBase = removeSlash(localPath).substring(basePath.length)
localPathWithoutBase = './' + removeSlash(localPathWithoutBase)
const url = mergeUrl(localPathWithoutBase, target)
return url
}
export default class ReverseProxy {
constructor(app, { httpServer }) {
app.config.watch('reverseProxies', proxies => {
this._proxies = Object.keys(proxies)
.sort((a, b) => b.length - a.length)
.map(path => {
let config = proxies[path]
if (typeof config === 'string') {
config = { target: config }
}
config.path = '/proxy/v1/' + removeSlash(path) + '/'
return config
})
})
httpServer.on('request', (req, res) => this._proxy(req, res))
httpServer.on('upgrade', (req, socket, head) => this._upgrade(req, socket, head))
}
_getConfigFromRequest(req) {
return this._proxies.find(({ path }) => req.url.startsWith(path))
}
_proxy(req, res) {
const config = this._getConfigFromRequest(req)
if (config === undefined) {
res.writeHead(404)
res.end('404')
return
}
const url = new URL(config.target)
const targetUrl = localToBackendUrl(config.path, url, req.originalUrl || req.url)
proxy.web(req, res, {
...urlToHttpOptions(targetUrl),
...config.options,
onReq: (req, { headers }) => {
headers['x-forwarded-for'] = req.socket.remoteAddress
headers['x-forwarded-proto'] = req.socket.encrypted ? 'https' : 'http'
if (req.headers.host !== undefined) {
headers['x-forwarded-host'] = req.headers.host
}
},
onRes: (req, res, proxyRes) => {
// rewrite redirect to pass through this proxy
if (proxyRes.statusCode === 301 || proxyRes.statusCode === 302) {
// handle relative/ absolute location
const redirectTargetLocation = new URL(proxyRes.headers.location, url)
// this proxy should only allow communication between known hosts. Don't open it too much
if (redirectTargetLocation.hostname !== url.hostname || redirectTargetLocation.protocol !== url.protocol) {
throw new Error(`Can't redirect from ${url.hostname} to ${redirectTargetLocation.hostname} `)
}
res.writeHead(proxyRes.statusCode, {
...proxyRes.headers,
location: backendToLocalPath(config.path, url, redirectTargetLocation),
})
res.end()
return
}
// pass through the answer of the remote server
res.writeHead(proxyRes.statusCode, proxyRes.headers)
// pass through content
proxyRes.pipe(res)
},
})
}
_upgrade(req, socket, head) {
const config = this._getConfigFromRequest(req)
if (config === undefined) {
return
}
const { path, target, options } = config
const targetUrl = localToBackendUrl(path, target, req.originalUrl || req.url)
proxy.ws(req, socket, head, {
...urlToHttpOptions(targetUrl),
...options,
})
}
}

View File

@@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDETCCAfkCFHXO1U7YJHI61bPNhYDvyBNJYH4LMA0GCSqGSIb3DQEBCwUAMEUx
CzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl
cm5ldCBXaWRnaXRzIFB0eSBMdGQwHhcNMjIwMTEwMTI0MTU4WhcNNDkwNTI3MTI0
MTU4WjBFMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UE
CgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOC
AQ8AMIIBCgKCAQEA1jMLdHuZu2R1fETyB2iRect1alwv76clp/7A8tx4zNaVA9qB
BcHbI83mkozuyrXpsEUblTvvcWkheBPAvWD4gj0eWSDSiuf0edcIS6aky+Lr/n0T
W/vL5kVNrgPTlsO8OyQcXjDeuUOR1xDWIa8G71Ynd6wtATB7oXe7kaV/Z6b2fENr
4wlW0YEDnMHik59c9jXDshhQYDlErwZsSyHuLwkC7xuYO26SUW9fPcHJA3uOfxeG
BrCxMuSMOJtdmslRWhLCjbk0PT12OYCCRlvuTvPHa8N57GEQbi4xAu+XATgO1DUm
Dq/oCSj0TcWUXXOykN/PAC2cjIyqkU2e7orGaQIDAQABMA0GCSqGSIb3DQEBCwUA
A4IBAQCTshhF3V5WVhnpFGHd+tPfeHmUVrUnbC+xW7fSeWpamNmTjHb7XB6uDR0O
DGswhEitbbSOsCiwz4/zpfE3/3+X07O8NPbdHTVHCei6D0uyegEeWQ2HoocfZs3X
8CORe8TItuvQAevV17D0WkGRoJGVAOiKo+izpjI55QXQ+FjkJ0bfl1iksnUJk0+I
ZNmRRNjNyOxo7NAzomSBHfJ5rDE+E440F2uvXIE9OIwHRiq6FGvQmvGijPeeP5J0
LzcSK98jfINFSsA/Wn5vWE+gfH9ySD2G3r2cDTS904T77PNiYH+cNSP6ujtmNzvK
Bgoa3jXZPRBi82TUOb2jj5DB33bg
-----END CERTIFICATE-----

View File

@@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA1jMLdHuZu2R1fETyB2iRect1alwv76clp/7A8tx4zNaVA9qB
BcHbI83mkozuyrXpsEUblTvvcWkheBPAvWD4gj0eWSDSiuf0edcIS6aky+Lr/n0T
W/vL5kVNrgPTlsO8OyQcXjDeuUOR1xDWIa8G71Ynd6wtATB7oXe7kaV/Z6b2fENr
4wlW0YEDnMHik59c9jXDshhQYDlErwZsSyHuLwkC7xuYO26SUW9fPcHJA3uOfxeG
BrCxMuSMOJtdmslRWhLCjbk0PT12OYCCRlvuTvPHa8N57GEQbi4xAu+XATgO1DUm
Dq/oCSj0TcWUXXOykN/PAC2cjIyqkU2e7orGaQIDAQABAoIBAQC65uVq6WLWGa1O
FtbdUggGL1svyGrngYChGvB/uZMKoX57U1DbljDCCCrV23WNmbfkYBjWWervmZ1j
qlC2roOJGQ1/Fd3A6O7w1YnegPUxFrt3XunijE55iiVi3uHknryDGlpKcfgVzfjW
oVFHKPMzKYjcqnbGn+hwlwoq5y7JYFTOa57/dZbyommbodRyy9Dpn0OES0grQqwR
VD1amrQ7XJhukcxQgYPuDc/jM3CuowoBsv9f+Q2zsPgr6CpHxxLLIs+kt8NQJT9v
neg/pm8ojcwOa9qoILdtu6ue7ee3VE9cFnB1vutxS1+MPeI5wgTJjaYrgPCMxXBM
2LdJJEmBAoGBAPA6LpuU1vv5R3x66hzenSk4LS1fj24K0WuBdTwFvzQmCr70oKdo
Yywxt+ZkBw5aEtzQlB8GewolHobDJrzxMorU+qEXX3jP2BIPDVQl2orfjr03Yyus
s5mYS/Qa6Zf1yObrjulTNm8oTn1WaG3TIvi8c5DyG2OK28N/9oMI1XGRAoGBAORD
YKyII/S66gZsJSf45qmrhq1hHuVt1xae5LUPP6lVD+MCCAmuoJnReV8fc9h7Dvgd
YPMINkWUTePFr3o4p1mh2ZC7ldczgDn6X4TldY2J3Zg47xJa5hL0L6JL4NiCGRIE
FV5rLJxkGh/DDBfmC9hQQ6Yg6cHvyewso5xVnBtZAoGAI+OdWPMIl0ZrrqYyWbPM
aP8SiMfRBtCo7tW9bQUyxpi0XEjxw3Dt+AlJfysMftFoJgMnTedK9H4NLHb1T579
PQ6KjwyN39+1WSVUiXDKUJsLmSswLrMzdcvx9PscUO6QYCdrB2K+LCcqasFBAr9b
ZyvIXCw/eUSihneUnYjxUnECgYAoPgCzKiU8ph9QFozOaUExNH4/3tl1lVHQOR8V
FKUik06DtP35xwGlXJrLPF5OEhPnhjZrYk0/IxBAUb/ICmjmknQq4gdes0Ot9QgW
A+Yfl+irR45ObBwXx1kGgd4YDYeh93pU9QweXj+Ezfw50mLQNgZXKYJMoJu2uX/2
tdkZsQKBgQCTfDcW8qBntI6V+3Gh+sIThz+fjdv5+qT54heO4EHadc98ykEZX0M1
sCWJiAQWM/zWXcsTndQDgDsvo23jpoulVPDitSEISp5gSe9FEN2njsVVID9h1OIM
f30s5kwcJoiV9kUCya/BFtuS7kbuQfAyPU0v3I+lUey6VCW6A83OTg==
-----END RSA PRIVATE KEY-----

View File

@@ -0,0 +1,60 @@
import { createServer as creatServerHttps } from 'https'
import { createServer as creatServerHttp } from 'http'
import { WebSocketServer } from 'ws'
import fs from 'fs'
const httpsServer = creatServerHttps({
key: fs.readFileSync('key.pem'),
cert: fs.readFileSync('cert.pem'),
})
const httpServer = creatServerHttp()
const wss = new WebSocketServer({ noServer: true, perMessageDeflate: false })
function upgrade(request, socket, head) {
const { pathname } = new URL(request.url)
// web socket server only on /foo url
if (pathname === '/foo') {
wss.handleUpgrade(request, socket, head, function done(ws) {
wss.emit('connection', ws, request)
ws.on('message', function message(data) {
ws.send(data)
})
})
} else {
socket.destroy()
}
}
function httpHandler(req, res) {
switch (req.url) {
case '/index.html':
res.end('hi')
return
case '/redirect':
res.writeHead(301, {
Location: 'index.html',
})
res.end()
return
case '/chainRedirect':
res.writeHead(301, {
Location: '/redirect',
})
res.end()
return
default:
res.writeHead(404)
res.end()
}
}
httpsServer.on('upgrade', upgrade)
httpServer.on('upgrade', upgrade)
httpsServer.on('request', httpHandler)
httpServer.on('request', httpHandler)
httpsServer.listen(8080)
httpServer.listen(8081)

View File

@@ -0,0 +1,123 @@
import ReverseProxy, { backendToLocalPath, localToBackendUrl } from '../dist/app/mixins/reverseProxy.mjs'
import { deepEqual, strictEqual } from 'assert'
function makeApp(reverseProxies) {
return {
config: {
get: () => reverseProxies,
},
}
}
const app = makeApp({
https: {
target: 'https://localhost:8080/remotePath/?baseParm=1#one=2&another=3',
oneOption: true,
},
http: 'http://localhost:8080/remotePath/?baseParm=1#one=2&another=3',
})
// test localToBackendUrl
const expectedLocalToRemote = {
https: [
{
local: '/proxy/v1/https/',
remote: 'https://localhost:8080/remotePath/?baseParm=1#one=2&another=3',
},
{
local: '/proxy/v1/https/sub',
remote: 'https://localhost:8080/remotePath/sub?baseParm=1#one=2&another=3',
},
{
local: '/proxy/v1/https/sub/index.html',
remote: 'https://localhost:8080/remotePath/sub/index.html?baseParm=1#one=2&another=3',
},
{
local: '/proxy/v1/https/sub?param=1',
remote: 'https://localhost:8080/remotePath/sub?baseParm=1&param=1#one=2&another=3',
},
{
local: '/proxy/v1/https/sub?baseParm=willbeoverwritten&param=willstay',
remote: 'https://localhost:8080/remotePath/sub?baseParm=1&param=willstay#one=2&another=3',
},
{
local: '/proxy/v1/https/sub?param=1#another=willoverwrite',
remote: 'https://localhost:8080/remotePath/sub?baseParm=1&param=1#another=willoverwrite',
},
],
}
const proxy = new ReverseProxy(app, { httpServer: { on: () => {} } })
for (const proxyId in expectedLocalToRemote) {
for (const { local, remote } of expectedLocalToRemote[proxyId]) {
const config = proxy._getConfigFromRequest({ url: local })
const url = new URL(config.target)
strictEqual(localToBackendUrl(config.path, url, local).href, remote, 'error converting to backend')
}
}
// test backendToLocalPath
const expectedRemoteToLocal = {
https: [
{
local: '/proxy/v1/https/',
remote: 'https://localhost:8080/remotePath/',
},
{
local: '/proxy/v1/https/sub/index.html',
remote: '/remotePath/sub/index.html',
},
{
local: '/proxy/v1/https/?baseParm=1#one=2&another=3',
remote: '?baseParm=1#one=2&another=3',
},
{
local: '/proxy/v1/https/sub?baseParm=1#one=2&another=3',
remote: 'https://localhost:8080/remotePath/sub?baseParm=1#one=2&another=3',
},
],
}
for (const proxyId in expectedRemoteToLocal) {
for (const { local, remote } of expectedRemoteToLocal[proxyId]) {
const config = proxy._getConfigFromRequest({ url: local })
const targetUrl = new URL('https://localhost:8080/remotePath/?baseParm=1#one=2&another=3')
const remoteUrl = new URL(remote, targetUrl)
strictEqual(backendToLocalPath(config.path, targetUrl, remoteUrl), local, 'error converting to local')
}
}
// test _getConfigFromRequest
const expectedConfig = [
{
local: '/proxy/v1/http/other',
config: {
target: 'http://localhost:8080/remotePath/?baseParm=1#one=2&another=3',
options: {},
path: '/proxy/v1/http',
},
},
{
local: '/proxy/v1/http',
config: undefined,
},
{
local: '/proxy/v1/other',
config: undefined,
},
{
local: '/proxy/v1/https/',
config: {
target: 'https://localhost:8080/remotePath/?baseParm=1#one=2&another=3',
options: {
oneOption: true,
},
path: '/proxy/v1/https',
},
},
]
for (const { local, config } of expectedConfig) {
deepEqual(proxy._getConfigFromRequest({ url: local }), config)
}

View File

@@ -45,7 +45,7 @@
"strip-indent": "^3.0.0",
"xdg-basedir": "^4.0.0",
"xo-lib": "^0.11.1",
"xo-vmdk-to-vhd": "^2.0.1"
"xo-vmdk-to-vhd": "^2.0.3"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/xapi",
"version": "0.8.4",
"version": "0.8.5",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
@@ -38,7 +38,7 @@
"prepublishOnly": "yarn run build"
},
"dependencies": {
"@vates/decorate-with": "^0.1.0",
"@vates/decorate-with": "^1.0.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.3.0",
"d3-time-format": "^3.0.0",

View File

@@ -60,7 +60,7 @@ module.exports = class Vm {
try {
vdi = await this[vdiRefOrUuid.startsWith('OpaqueRef:') ? 'getRecord' : 'getRecordByUuid']('VDI', vdiRefOrUuid)
} catch (error) {
warn(error)
warn('_assertHealthyVdiChain, could not fetch VDI', { error })
return
}
cache[vdi.$ref] = vdi
@@ -81,7 +81,7 @@ module.exports = class Vm {
try {
vdi = await this.getRecord('VDI', vdiRef)
} catch (error) {
warn(error)
warn('_assertHealthyVdiChain, could not fetch VDI', { error })
return
}
cache[vdiRef] = vdi
@@ -167,7 +167,7 @@ module.exports = class Vm {
memory_static_min,
name_description,
name_label,
// NVRAM, // experimental
NVRAM,
order,
other_config = {},
PCI_bus = '',
@@ -256,6 +256,7 @@ module.exports = class Vm {
is_vmss_snapshot,
name_description,
name_label,
NVRAM,
order,
reference_label,
shutdown_delay,

View File

@@ -1,14 +1,91 @@
# ChangeLog
## **5.65.2** (2021-12-10)
## **5.66.2** (2022-01-05)
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
### Bug fixes
- [Backup] Fix `handler.rmTree` is not a function (Forum [5256](https://xcp-ng.org/forum/topic/5256/s3-backup-try-it/29) PR [#6041](https://github.com/vatesfr/xen-orchestra/pull/6041) )
- [Backup] Fix `EEXIST` in logs when multiple merge tasks are created at the same time ([Forum #5301](https://xcp-ng.org/forum/topic/5301/warnings-errors-in-journalctl))
- [Backup] Fix missing backup on restore (Forum [5256](https://xcp-ng.org/forum/topic/5256/s3-backup-try-it/29) (PR [#6048](https://github.com/vatesfr/xen-orchestra/pull/6048))
- [Import/Disk] Fix `JSON.parse` and `createReadableSparseStream is not a function` errors [#6068](https://github.com/vatesfr/xen-orchestra/issues/6068)
- [Backup] Fix delta backup are almost always full backup instead of differentials [Forum#5256](https://xcp-ng.org/forum/topic/5256/s3-backup-try-it/69) [Forum#5371](https://xcp-ng.org/forum/topic/5371/delta-backup-changes-in-5-66) (PR [#6075](https://github.com/vatesfr/xen-orchestra/pull/6075))
### Released packages
- vhd-lib 3.0.0
- xo-vmdk-to-vhd 2.0.3
- @xen-orchestra/backups 0.18.3
- @xen-orchestra/proxy 0.17.3
- xo-server 5.86.3
- xo-web 5.91.2
## **5.66.1** (2021-12-23)
### Bug fixes
- [Dashboard/Health] Fix `error has occured` when a pool has no default SR
- [Delta Backup] Fix unnecessary full backup when not using S3 [Forum#5371](https://xcp-ng.org/forum/topic/5371/delta-backup-changes-in-5-66) (PR [#6070](https://github.com/vatesfr/xen-orchestra/pull/6070))
- [Backup] Fix incorrect warnings `incorrect size [...] instead of undefined`
### Released packages
- @xen-orchestra/backups 0.18.2
- @xen-orchestra/proxy 0.17.2
- xo-server 5.86.2
- xo-web 5.91.1
## **5.66.0** (2021-12-21)
### Enhancements
- [About] Show commit instead of version numbers for source users (PR [#6045](https://github.com/vatesfr/xen-orchestra/pull/6045))
- [Health] Display default SRs that aren't shared [#5871](https://github.com/vatesfr/xen-orchestra/issues/5871) (PR [#6033](https://github.com/vatesfr/xen-orchestra/pull/6033))
- [Pool,VM/advanced] Ability to change the suspend SR [#4163](https://github.com/vatesfr/xen-orchestra/issues/4163) (PR [#6044](https://github.com/vatesfr/xen-orchestra/pull/6044))
- [Home/VMs/Backup filter] Filter out VMs in disabled backup jobs (PR [#6037](https://github.com/vatesfr/xen-orchestra/pull/6037))
- [Rolling Pool Update] Automatically disable High Availability during the update [#5711](https://github.com/vatesfr/xen-orchestra/issues/5711) (PR [#6057](https://github.com/vatesfr/xen-orchestra/pull/6057))
- [Delta Backup on S3] Compress blocks by default ([Brotli](https://en.wikipedia.org/wiki/Brotli)) which reduces remote usage and increase backup speed (PR [#5932](https://github.com/vatesfr/xen-orchestra/pull/5932))
### Bug fixes
- [Tables/actions] Fix collapsed actions being clickable despite being disabled (PR [#6023](https://github.com/vatesfr/xen-orchestra/pull/6023))
- [Backup] Remove incorrect size warning following a merge [Forum#5727](https://xcp-ng.org/forum/topic/4769/warnings-showing-in-system-logs-following-each-backup-job/4) (PR [#6010](https://github.com/vatesfr/xen-orchestra/pull/6010))
- [Delta Backup] Preserve UEFI boot parameters [#6054](https://github.com/vatesfr/xen-orchestra/issues/6054) [Forum#5319](https://xcp-ng.org/forum/topic/5319/bug-uefi-boot-parameters-not-preserved-with-delta-backups)
### Released packages
- @xen-orchestra/mixins 0.1.2
- @xen-orchestra/xapi 0.8.5
- vhd-lib 2.1.0
- xo-vmdk-to-vhd 2.0.2
- @xen-orchestra/backups 0.18.1
- @xen-orchestra/proxy 0.17.1
- xo-server 5.86.1
- xo-web 5.91.0
## **5.65.3** (2021-12-20)
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
### Bug fixes
- [Continuous Replication] Fix `could not find the base VM`
- [Backup/Smart mode] Always ignore replicated VMs created by the current job
- [Backup] Fix `Unexpected end of JSON input` during merge step
- [Backup] Fix stuck jobs when using S3 remotes (PR [#6067](https://github.com/vatesfr/xen-orchestra/pull/6067))
### Released packages
- @xen-orchestra/fs 0.19.3
- vhd-lib 2.0.4
- @xen-orchestra/backups 0.17.1
- xo-server 5.85.1
## **5.65.2** (2021-12-10)
### Bug fixes
- [Backup] Fix `handler.rmTree` is not a function [Forum#5256](https://xcp-ng.org/forum/topic/5256/s3-backup-try-it/29) (PR [#6041](https://github.com/vatesfr/xen-orchestra/pull/6041))
- [Backup] Fix `EEXIST` in logs when multiple merge tasks are created at the same time [Forum#5301](https://xcp-ng.org/forum/topic/5301/warnings-errors-in-journalctl)
- [Backup] Fix missing backup on restore [Forum#5256](https://xcp-ng.org/forum/topic/5256/s3-backup-try-it/29) (PR [#6048](https://github.com/vatesfr/xen-orchestra/pull/6048))
### Released packages
@@ -22,10 +99,10 @@
### Bug fixes
- [Delta Backup Restoration] Fix assertion error [Forum #5257](https://xcp-ng.org/forum/topic/5257/problems-building-from-source/16)
- [Delta Backup Restoration] `TypeError: this disposable has already been disposed` [Forum #5257](https://xcp-ng.org/forum/topic/5257/problems-building-from-source/20)
- [Backups] Fix: `Error: Chaining alias is forbidden xo-vm-backups/..alias.vhd to xo-vm-backups/....alias.vhd` when backuping a file to s3 [Forum #5226](https://xcp-ng.org/forum/topic/5256/s3-backup-try-it)
- [Delta Backup Restoration] `VDI_IO_ERROR(Device I/O errors)` [Forum #5727](https://xcp-ng.org/forum/topic/5257/problems-building-from-source/4) (PR [#6031](https://github.com/vatesfr/xen-orchestra/pull/6031))
- [Delta Backup Restoration] Fix assertion error [Forum#5257](https://xcp-ng.org/forum/topic/5257/problems-building-from-source/16)
- [Delta Backup Restoration] `TypeError: this disposable has already been disposed` [Forum#5257](https://xcp-ng.org/forum/topic/5257/problems-building-from-source/20)
- [Backups] Fix: `Error: Chaining alias is forbidden xo-vm-backups/..alias.vhd to xo-vm-backups/....alias.vhd` when backuping a file to s3 [Forum#5226](https://xcp-ng.org/forum/topic/5256/s3-backup-try-it)
- [Delta Backup Restoration] `VDI_IO_ERROR(Device I/O errors)` [Forum#5727](https://xcp-ng.org/forum/topic/5257/problems-building-from-source/4) (PR [#6031](https://github.com/vatesfr/xen-orchestra/pull/6031))
- [Delta Backup] Fix `Cannot read property 'uuid' of undefined` when a VDI has been removed from a backed up VM (PR [#6034](https://github.com/vatesfr/xen-orchestra/pull/6034))
### Released packages
@@ -42,7 +119,7 @@
### Highlights
- [VM] Ability to export a snapshot's memory (PR [#6015](https://github.com/vatesfr/xen-orchestra/pull/6015))
- [Cloud config] Ability to create a network cloud config template and reuse it in the VM creation [#5931](https://github.com/vatesfr/xen-orchestra/issues/5931) (PR [#5979](https://github.com/vatesfr/xen-orchestra/pull/5979))
- [Cloud config] Ability to create a network cloud config template and reuse it in the VM creation [#5931](https://github.com/vatesfr/xen-orchestra/issues/5931) (PR [#5979](https://github.com/vatesfr/xen-orchestra/pull/5979))
- [Backup/logs] identify XAPI errors (PR [#6001](https://github.com/vatesfr/xen-orchestra/pull/6001))
- [lite] Highlight selected VM (PR [#5939](https://github.com/vatesfr/xen-orchestra/pull/5939))
@@ -70,8 +147,6 @@
## **5.64.0** (2021-10-29)
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
## Highlights
- [Netbox] Support older versions of Netbox and prevent "active is not a valid choice" error [#5898](https://github.com/vatesfr/xen-orchestra/issues/5898) (PR [#5946](https://github.com/vatesfr/xen-orchestra/pull/5946))
@@ -79,8 +154,8 @@
- [Host] Handle evacuation failure during host shutdown (PR [#5966](https://github.com/vatesfr/xen-orchestra/pull/#5966))
- [Menu] Notify user when proxies need to be upgraded (PR [#5930](https://github.com/vatesfr/xen-orchestra/pull/5930))
- [Servers] Ability to use an HTTP proxy between XO and a server (PR [#5958](https://github.com/vatesfr/xen-orchestra/pull/5958))
- [VM/export] Ability to copy the export URL (PR [#5948](https://github.com/vatesfr/xen-orchestra/pull/5948))
- [Pool/advanced] Ability to define network for importing/exporting VMs/VDIs (PR [#5957](https://github.com/vatesfr/xen-orchestra/pull/5957))
- [VM/export] Ability to copy the export URL (PR [#5948](https://github.com/vatesfr/xen-orchestra/pull/5948))
- [Pool/advanced] Ability to define network for importing/exporting VMs/VDIs (PR [#5957](https://github.com/vatesfr/xen-orchestra/pull/5957))
- [Host/advanced] Add button to enable/disable the host (PR [#5952](https://github.com/vatesfr/xen-orchestra/pull/5952))
- [Backups] Enable merge worker by default
@@ -116,8 +191,8 @@
### Highlights
- [Backup] Go back to previous page instead of going to the overview after editing a job: keeps current filters and page (PR [#5913](https://github.com/vatesfr/xen-orchestra/pull/5913))
- [Health] Do not take into consideration duplicated MAC addresses from CR VMs (PR [#5916](https://github.com/vatesfr/xen-orchestra/pull/5916))
- [Health] Ability to filter duplicated MAC addresses by running VMs (PR [#5917](https://github.com/vatesfr/xen-orchestra/pull/5917))
- [Health] Do not take into consideration duplicated MAC addresses from CR VMs (PR [#5916](https://github.com/vatesfr/xen-orchestra/pull/5916))
- [Health] Ability to filter duplicated MAC addresses by running VMs (PR [#5917](https://github.com/vatesfr/xen-orchestra/pull/5917))
- [Tables] Move the search bar and pagination to the top of the table (PR [#5914](https://github.com/vatesfr/xen-orchestra/pull/5914))
- [Netbox] Handle nested prefixes by always assigning an IP to the smallest prefix it matches (PR [#5908](https://github.com/vatesfr/xen-orchestra/pull/5908))
@@ -265,7 +340,7 @@
### Bug fixes
- [SDN Controller] Private network creation failure when the tunnels were created on different devices [Forum #4620](https://xcp-ng.org/forum/topic/4620/no-pif-found-in-center) (PR [#5793](https://github.com/vatesfr/xen-orchestra/pull/5793))
- [SDN Controller] Private network creation failure when the tunnels were created on different devices [Forum#4620](https://xcp-ng.org/forum/topic/4620/no-pif-found-in-center) (PR [#5793](https://github.com/vatesfr/xen-orchestra/pull/5793))
### Released packages
@@ -384,7 +459,7 @@
- [Proxy] _Redeploy_ now works when the bound VM is missing
- [VM template] Fix confirmation modal doesn't appear on deleting a default template (PR [#5644](https://github.com/vatesfr/xen-orchestra/pull/5644))
- [OVA VM Import] Fix imported VMs all having the same MAC addresses
- [Disk import] Fix `an error has occurred` when importing wrong format or corrupted files [#5663](https://github.com/vatesfr/xen-orchestra/issues/5663) (PR [#5683](https://github.com/vatesfr/xen-orchestra/pull/5683))
- [Disk import] Fix `an error has occurred` when importing wrong format or corrupted files [#5663](https://github.com/vatesfr/xen-orchestra/issues/5663) (PR [#5683](https://github.com/vatesfr/xen-orchestra/pull/5683))
### Released packages

View File

@@ -7,17 +7,24 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [About] Show commit instead of version numbers for source users (PR [#6045](https://github.com/vatesfr/xen-orchestra/pull/6045))
- [Health] Display default SRs that aren't shared [#5871](https://github.com/vatesfr/xen-orchestra/issues/5871) (PR [#6033](https://github.com/vatesfr/xen-orchestra/pull/6033))
- [Pool,VM/advanced] Ability to change the suspend SR [#4163](https://github.com/vatesfr/xen-orchestra/issues/4163) (PR [#6044](https://github.com/vatesfr/xen-orchestra/pull/6044))
- Limit number of concurrent VM migrations per pool to `3`. Can be changed in `xo-server`'s configuration file: `xapiOptions.vmMigrationConcurrency` [#6065](https://github.com/vatesfr/xen-orchestra/issues/6065) (PR [#6076](https://github.com/vatesfr/xen-orchestra/pull/6076))
- [Proxy] Now ships a reverse proxy (PR [#6072](https://github.com/vatesfr/xen-orchestra/pull/6072))
- [Delta Backup] When using S3 remote, retry uploading VHD parts on Internal Error to support [Blackblaze](https://www.backblaze.com/b2/docs/calling.html#error_handling) [Forum#5397](https://xcp-ng.org/forum/topic/5397/delta-backups-failing-aws-s3-uploadpartcopy-cpu-too-busy/5) (PR [#6086](https://github.com/vatesfr/xen-orchestra/issues/6086))
- [Backup] Add sanity check of aliases on S3 remotes (PR [#6043](https://github.com/vatesfr/xen-orchestra/pull/6043))
- [Export/Disks] Allow the export of disks in VMDK format (PR [#5982](https://github.com/vatesfr/xen-orchestra/pull/5982))
- [Rolling Pool Update] Automatically pause load balancer plugin during the update [#5711](https://github.com/vatesfr/xen-orchestra/issues/5711)
- [Backup] Speedup merge and cleanup speed for S3 backup by a factor 10 (PR [#6100](https://github.com/vatesfr/xen-orchestra/pull/6100))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [Tables/actions] Fix collapsed actions being clickable despite being disabled (PR [#6023](https://github.com/vatesfr/xen-orchestra/pull/6023))
- [Continuous Replication] Fix `could not find the base VM`
- [Backup/Smart mode] Always ignore replicated VMs created by the current job
- [Backup] Detect and clear orphan merge states, fix `ENOENT` errors (PR [#6087](https://github.com/vatesfr/xen-orchestra/pull/6087))
- [Backup] Ensure merges are also executed after backup on S3, maintaining the size of the VHD chain under control [Forum#45743](https://xcp-ng.org/forum/post/45743) (PR [#6095](https://github.com/vatesfr/xen-orchestra/pull/6095))
- [Backup] Delete backups immediately instead of waiting for the next backup (PR [#6081](https://github.com/vatesfr/xen-orchestra/pull/6081))
- [Backup] Delete S3 backups completely, even if there are more than 1000 files (PR [#6103](https://github.com/vatesfr/xen-orchestra/pull/6103))
- [Backup] Fix merge resuming (PR [#6099](https://github.com/vatesfr/xen-orchestra/pull/6099))
- [Plugin/Audit] Fix `key cannot be 'null' or 'undefined'` error when no audit log in the database [#6040](https://github.com/vatesfr/xen-orchestra/issues/6040) (PR [#6071](https://github.com/vatesfr/xen-orchestra/pull/6071))
### Packages to release
@@ -36,7 +43,12 @@
>
> In case of conflict, the highest (lowest in previous list) `$version` wins.
- @xen-orchestra/fs minor
- vhd-lib minor
- xo-vmdk-to-vhd minor
- @xen-orchestra/backups minor
- @xen-orchestra/backups-cli minor
- @xen-orchestra/proxy minor
- xo-server-audit patch
- xo-server minor
- xo-web minor

View File

@@ -46,7 +46,6 @@
],
"^(value-matcher)$": "$1/src",
"^(vhd-cli)$": "$1/src",
"^(vhd-lib)$": "$1/src",
"^(xo-[^/]+)$": [
"$1/src",
"$1"

View File

@@ -68,7 +68,7 @@ predicate([false, { foo: 'bar', baz: 42 }, null, 42]) // true
predicate('foo') // false
```
### `{ __all: Pattern[] }`
### `{ __and: Pattern[] }`
All patterns must match.

View File

@@ -50,7 +50,7 @@ predicate([false, { foo: 'bar', baz: 42 }, null, 42]) // true
predicate('foo') // false
```
### `{ __all: Pattern[] }`
### `{ __and: Pattern[] }`
All patterns must match.

View File

@@ -24,12 +24,12 @@
"node": ">=8.10"
},
"dependencies": {
"@xen-orchestra/fs": "^0.19.2",
"@xen-orchestra/fs": "^0.19.3",
"cli-progress": "^3.1.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"human-format": "^0.11.0",
"vhd-lib": "^2.0.3"
"vhd-lib": "^3.0.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1 +0,0 @@
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -1,16 +1,16 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import fs from 'fs-extra'
import { getSyncedHandler } from '@xen-orchestra/fs'
import { Disposable, pFromCallback } from 'promise-toolbox'
const rimraf = require('rimraf')
const tmp = require('tmp')
const fs = require('fs-extra')
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { Disposable, pFromCallback } = require('promise-toolbox')
import { openVhd } from '../index'
import { checkFile, createRandomFile, convertFromRawToVhd, createRandomVhdDirectory } from '../tests/utils'
import { VhdAbstract } from './VhdAbstract'
import { BLOCK_UNUSED, FOOTER_SIZE, HEADER_SIZE, PLATFORMS, SECTOR_SIZE } from '../_constants'
import { unpackHeader, unpackFooter } from './_utils'
const { openVhd } = require('../index')
const { checkFile, createRandomFile, convertFromRawToVhd, createRandomVhdDirectory } = require('../tests/utils')
const { VhdAbstract } = require('./VhdAbstract')
const { BLOCK_UNUSED, FOOTER_SIZE, HEADER_SIZE, PLATFORMS, SECTOR_SIZE } = require('../_constants')
const { unpackHeader, unpackFooter } = require('./_utils')
let tempDir

View File

@@ -1,5 +1,11 @@
import { computeBatSize, computeSectorOfBitmap, computeSectorsPerBlock, sectorsToBytes } from './_utils'
import {
const {
computeBatSize,
computeFullBlockSize,
computeSectorOfBitmap,
computeSectorsPerBlock,
sectorsToBytes,
} = require('./_utils')
const {
ALIAS_MAX_PATH_LENGTH,
PLATFORMS,
SECTOR_SIZE,
@@ -7,20 +13,20 @@ import {
FOOTER_SIZE,
HEADER_SIZE,
BLOCK_UNUSED,
} from '../_constants'
import assert from 'assert'
import path from 'path'
import asyncIteratorToStream from 'async-iterator-to-stream'
import { checksumStruct, fuFooter, fuHeader } from '../_structs'
import { isVhdAlias, resolveAlias } from '../_resolveAlias'
} = require('../_constants')
const assert = require('assert')
const path = require('path')
const asyncIteratorToStream = require('async-iterator-to-stream')
const { checksumStruct, fuFooter, fuHeader } = require('../_structs')
const { isVhdAlias, resolveVhdAlias } = require('../aliases')
export class VhdAbstract {
exports.VhdAbstract = class VhdAbstract {
get bitmapSize() {
return sectorsToBytes(this.sectorsOfBitmap)
}
get fullBlockSize() {
return sectorsToBytes(this.sectorsOfBitmap + this.sectorsPerBlock)
return computeFullBlockSize(this.header.blockSize)
}
get sectorsOfBitmap() {
@@ -192,7 +198,7 @@ export class VhdAbstract {
}
static async unlink(handler, path) {
const resolved = await resolveAlias(handler, path)
const resolved = await resolveVhdAlias(handler, path)
try {
await handler.unlink(resolved)
} catch (err) {

View File

@@ -1,12 +1,13 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { Disposable, pFromCallback } from 'promise-toolbox'
const rimraf = require('rimraf')
const tmp = require('tmp')
const fs = require('fs-extra')
const { getHandler, getSyncedHandler } = require('@xen-orchestra/fs')
const { Disposable, pFromCallback } = require('promise-toolbox')
import { openVhd } from '../openVhd'
import { createRandomFile, convertFromRawToVhd, convertToVhdDirectory } from '../tests/utils'
const { openVhd, VhdDirectory } = require('../')
const { createRandomFile, convertFromRawToVhd, convertToVhdDirectory } = require('../tests/utils')
let tempDir = null
@@ -65,3 +66,49 @@ test('Can coalesce block', async () => {
expect(parentBlockData).toEqual(childBlockData)
})
})
test('compressed blocks and metadata works', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
const vhdName = `${tempDir}/parent.vhd`
await createRandomFile(rawFileName, initalSize)
await convertFromRawToVhd(rawFileName, vhdName)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const vhd = yield openVhd(handler, 'parent.vhd')
await vhd.readBlockAllocationTable()
const compressedVhd = yield VhdDirectory.create(handler, 'compressed.vhd', { compression: 'gzip' })
compressedVhd.header = vhd.header
compressedVhd.footer = vhd.footer
for await (const block of vhd.blocks()) {
await compressedVhd.writeEntireBlock(block)
}
await Promise
.all[(await compressedVhd.writeHeader(), await compressedVhd.writeFooter(), await compressedVhd.writeBlockAllocationTable())]
// compressed vhd have a metadata file
expect(await fs.exists(`${tempDir}/compressed.vhd/metadata.json`)).toEqual(true)
const metada = JSON.parse(await handler.readFile('compressed.vhd/metadata.json'))
expect(metada.compression.type).toEqual('gzip')
expect(metada.compression.options.level).toEqual(1)
// compressed vhd should not be broken
await compressedVhd.readHeaderAndFooter()
await compressedVhd.readBlockAllocationTable()
// check that footer and header are not modified
expect(compressedVhd.footer).toEqual(vhd.footer)
expect(compressedVhd.header).toEqual(vhd.header)
// their block content should not have changed
let counter = 0
for await (const block of compressedVhd.blocks()) {
const source = await vhd.readBlock(block.id)
expect(source.data.equals(block.data)).toEqual(true)
counter++
}
// neither the number of blocks
expect(counter).toEqual(2)
})
})

View File

@@ -1,15 +1,67 @@
import { unpackHeader, unpackFooter, sectorsToBytes } from './_utils'
import { createLogger } from '@xen-orchestra/log'
import { fuFooter, fuHeader, checksumStruct } from '../_structs'
import { test, set as setBitmap } from '../_bitmap'
import { VhdAbstract } from './VhdAbstract'
import assert from 'assert'
const { unpackHeader, unpackFooter, sectorsToBytes } = require('./_utils')
const { createLogger } = require('@xen-orchestra/log')
const { fuFooter, fuHeader, checksumStruct } = require('../_structs')
const { test, set: setBitmap } = require('../_bitmap')
const { VhdAbstract } = require('./VhdAbstract')
const assert = require('assert')
const promisify = require('promise-toolbox/promisify')
const zlib = require('zlib')
const { debug } = createLogger('vhd-lib:VhdDirectory')
const NULL_COMPRESSOR = {
compress: buffer => buffer,
decompress: buffer => buffer,
baseOptions: {},
}
const COMPRESSORS = {
gzip: {
compress: (
gzip => buffer =>
gzip(buffer, { level: zlib.constants.Z_BEST_SPEED })
)(promisify(zlib.gzip)),
decompress: promisify(zlib.gunzip),
},
brotli: {
compress: (
brotliCompress => buffer =>
brotliCompress(buffer, {
params: {
[zlib.constants.BROTLI_PARAM_QUALITY]: zlib.constants.BROTLI_MIN_QUALITY,
},
})
)(promisify(zlib.brotliCompress)),
decompress: promisify(zlib.brotliDecompress),
},
}
// inject identifiers
for (const id of Object.keys(COMPRESSORS)) {
COMPRESSORS[id].id = id
}
function getCompressor(compressorType) {
if (compressorType === undefined) {
return NULL_COMPRESSOR
}
const compressor = COMPRESSORS[compressorType]
if (compressor === undefined) {
throw new Error(`Compression type ${compressorType} is not supported`)
}
return compressor
}
// ===================================================================
// Directory format
// <path>
// ├─ chunk-filters.json
// │ Ordered array of filters that have been applied before writing chunks.
// │ These filters needs to be applied in reverse order to read them.
// │
// ├─ header // raw content of the header
// ├─ footer // raw content of the footer
// ├─ bat // bit array. A zero bit indicates at a position that this block is not present
@@ -18,10 +70,15 @@ const { debug } = createLogger('vhd-lib:VhdDirectory')
// └─ <the first to {blockId.length -3} numbers of blockId >
// └─ <the three last numbers of blockID > // block content.
export class VhdDirectory extends VhdAbstract {
exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
#uncheckedBlockTable
#header
footer
#compressor
get compressionType() {
return this.#compressor.id
}
set header(header) {
this.#header = header
@@ -57,9 +114,9 @@ export class VhdDirectory extends VhdAbstract {
}
}
static async create(handler, path, { flags = 'wx+' } = {}) {
static async create(handler, path, { flags = 'wx+', compression } = {}) {
await handler.mkdir(path)
const vhd = new VhdDirectory(handler, path, { flags })
const vhd = new VhdDirectory(handler, path, { flags, compression })
return {
dispose: () => {},
value: vhd,
@@ -71,6 +128,7 @@ export class VhdDirectory extends VhdAbstract {
this._handler = handler
this._path = path
this._opts = opts
this.#compressor = getCompressor(opts?.compression)
}
async readBlockAllocationTable() {
@@ -90,8 +148,9 @@ export class VhdDirectory extends VhdAbstract {
// here we can implement compression and / or crypto
const buffer = await this._handler.readFile(this._getChunkPath(partName))
const uncompressed = await this.#compressor.decompress(buffer)
return {
buffer: Buffer.from(buffer),
buffer: uncompressed,
}
}
@@ -101,9 +160,9 @@ export class VhdDirectory extends VhdAbstract {
'r',
`Can't write a chunk ${partName} in ${this._path} with read permission`
)
// here we can implement compression and / or crypto
return this._handler.outputFile(this._getChunkPath(partName), buffer, this._opts)
const compressed = await this.#compressor.compress(buffer)
return this._handler.outputFile(this._getChunkPath(partName), compressed, this._opts)
}
// put block in subdirectories to limit impact when doing directory listing
@@ -114,8 +173,20 @@ export class VhdDirectory extends VhdAbstract {
}
async readHeaderAndFooter() {
const { buffer: bufHeader } = await this._readChunk('header')
const { buffer: bufFooter } = await this._readChunk('footer')
await this.#readChunkFilters()
let bufHeader, bufFooter
try {
bufHeader = (await this._readChunk('header')).buffer
bufFooter = (await this._readChunk('footer')).buffer
} catch (error) {
// emit an AssertionError if the VHD is broken to stay as close as possible to the VhdFile API
if (error.code === 'ENOENT') {
assert(false, 'Header And Footer should exists')
} else {
throw error
}
}
const footer = unpackFooter(bufFooter)
const header = unpackHeader(bufHeader, footer)
@@ -150,12 +221,13 @@ export class VhdDirectory extends VhdAbstract {
await this._writeChunk('footer', rawFooter)
}
writeHeader() {
async writeHeader() {
const { header } = this
const rawHeader = fuHeader.pack(header)
header.checksum = checksumStruct(rawHeader, fuHeader)
debug(`Write header (checksum=${header.checksum}). (data=${rawHeader.toString('hex')})`)
return this._writeChunk('header', rawHeader)
await this._writeChunk('header', rawHeader)
await this.#writeChunkFilters()
}
writeBlockAllocationTable() {
@@ -167,9 +239,13 @@ export class VhdDirectory extends VhdAbstract {
// only works if data are in the same handler
// and if the full block is modified in child ( which is the case whit xcp)
// and if the compression type is same on both sides
async coalesceBlock(child, blockId) {
if (!(child instanceof VhdDirectory) || this._handler !== child._handler) {
if (
!(child instanceof VhdDirectory) ||
this._handler !== child._handler ||
child.compressionType !== this.compressionType
) {
return super.coalesceBlock(child, blockId)
}
await this._handler.copy(
@@ -192,4 +268,24 @@ export class VhdDirectory extends VhdAbstract {
await this._writeChunk('parentLocatorEntry' + id, data)
this.header.parentLocatorEntry[id].platformDataOffset = 0
}
async #writeChunkFilters() {
const compressionType = this.compressionType
const path = this._path + '/chunk-filters.json'
if (compressionType === undefined) {
await this._handler.unlink(path)
} else {
await this._handler.writeFile(path, JSON.stringify([compressionType]))
}
}
async #readChunkFilters() {
const chunkFilters = await this._handler.readFile(this._path + '/chunk-filters.json').then(JSON.parse, error => {
if (error.code === 'ENOENT') {
return []
}
throw error
})
this.#compressor = getCompressor(chunkFilters[0])
}
}

View File

@@ -1,25 +1,25 @@
/* eslint-env jest */
import execa from 'execa'
import fs from 'fs-extra'
import getStream from 'get-stream'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { Disposable, pFromCallback } from 'promise-toolbox'
import { randomBytes } from 'crypto'
const execa = require('execa')
const fs = require('fs-extra')
const getStream = require('get-stream')
const rimraf = require('rimraf')
const tmp = require('tmp')
const { getHandler } = require('@xen-orchestra/fs')
const { Disposable, pFromCallback } = require('promise-toolbox')
const { randomBytes } = require('crypto')
import { VhdFile } from './VhdFile'
import { openVhd } from '../openVhd'
const { VhdFile } = require('./VhdFile')
const { openVhd } = require('../openVhd')
import { SECTOR_SIZE } from '../_constants'
import {
const { SECTOR_SIZE } = require('../_constants')
const {
checkFile,
createRandomFile,
convertFromRawToVhd,
convertToVhdDirectory,
recoverRawContent,
} from '../tests/utils'
} = require('../tests/utils')
let tempDir = null

View File

@@ -1,11 +1,18 @@
import { BLOCK_UNUSED, FOOTER_SIZE, HEADER_SIZE, PLATFORMS, SECTOR_SIZE, PARENT_LOCATOR_ENTRIES } from '../_constants'
import { computeBatSize, sectorsToBytes, unpackHeader, unpackFooter, BUF_BLOCK_UNUSED } from './_utils'
import { createLogger } from '@xen-orchestra/log'
import { fuFooter, fuHeader, checksumStruct } from '../_structs'
import { set as mapSetBit } from '../_bitmap'
import { VhdAbstract } from './VhdAbstract'
import assert from 'assert'
import getFirstAndLastBlocks from '../_getFirstAndLastBlocks'
const {
BLOCK_UNUSED,
FOOTER_SIZE,
HEADER_SIZE,
PLATFORMS,
SECTOR_SIZE,
PARENT_LOCATOR_ENTRIES,
} = require('../_constants')
const { computeBatSize, sectorsToBytes, unpackHeader, unpackFooter, BUF_BLOCK_UNUSED } = require('./_utils')
const { createLogger } = require('@xen-orchestra/log')
const { fuFooter, fuHeader, checksumStruct } = require('../_structs')
const { set: mapSetBit } = require('../_bitmap')
const { VhdAbstract } = require('./VhdAbstract')
const assert = require('assert')
const getFirstAndLastBlocks = require('../_getFirstAndLastBlocks')
const { debug } = createLogger('vhd-lib:VhdFile')
@@ -43,7 +50,7 @@ const { debug } = createLogger('vhd-lib:VhdFile')
// - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize
// - sectorSize = 512
export class VhdFile extends VhdAbstract {
exports.VhdFile = class VhdFile extends VhdAbstract {
#uncheckedBlockTable
#header
footer

View File

@@ -1,14 +1,14 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import { Disposable, pFromCallback } from 'promise-toolbox'
import { getSyncedHandler } from '@xen-orchestra/fs'
const rimraf = require('rimraf')
const tmp = require('tmp')
const { Disposable, pFromCallback } = require('promise-toolbox')
const { getSyncedHandler } = require('@xen-orchestra/fs')
import { SECTOR_SIZE, PLATFORMS } from '../_constants'
import { createRandomFile, convertFromRawToVhd } from '../tests/utils'
import { openVhd, chainVhd } from '..'
import { VhdSynthetic } from './VhdSynthetic'
const { SECTOR_SIZE, PLATFORMS } = require('../_constants')
const { createRandomFile, convertFromRawToVhd } = require('../tests/utils')
const { openVhd, chainVhd } = require('..')
const { VhdSynthetic } = require('./VhdSynthetic')
let tempDir = null

View File

@@ -1,12 +1,12 @@
import * as UUID from 'uuid'
import cloneDeep from 'lodash/cloneDeep.js'
import { asyncMap } from '@xen-orchestra/async-map'
import { VhdAbstract } from './VhdAbstract'
import { DISK_TYPES, FOOTER_SIZE, HEADER_SIZE } from '../_constants'
const UUID = require('uuid')
const cloneDeep = require('lodash/cloneDeep.js')
const { asyncMap } = require('@xen-orchestra/async-map')
const { VhdAbstract } = require('./VhdAbstract')
const { DISK_TYPES, FOOTER_SIZE, HEADER_SIZE } = require('../_constants')
import assert from 'assert'
const assert = require('assert')
export class VhdSynthetic extends VhdAbstract {
exports.VhdSynthetic = class VhdSynthetic extends VhdAbstract {
#vhds = []
get header() {

View File

@@ -0,0 +1,67 @@
const assert = require('assert')
const { BLOCK_UNUSED, SECTOR_SIZE } = require('../_constants')
const { fuFooter, fuHeader, checksumStruct, unpackField } = require('../_structs')
const checkFooter = require('../checkFooter')
const checkHeader = require('../_checkHeader')
const computeBatSize = entries => sectorsToBytes(sectorsRoundUpNoZero(entries * 4))
exports.computeBatSize = computeBatSize
const computeSectorsPerBlock = blockSize => blockSize / SECTOR_SIZE
exports.computeSectorsPerBlock = computeSectorsPerBlock
// one bit per sector
const computeBlockBitmapSize = blockSize => computeSectorsPerBlock(blockSize) >>> 3
exports.computeBlockBitmapSize = computeBlockBitmapSize
const computeFullBlockSize = blockSize => blockSize + SECTOR_SIZE * computeSectorOfBitmap(blockSize)
exports.computeFullBlockSize = computeFullBlockSize
const computeSectorOfBitmap = blockSize => sectorsRoundUpNoZero(computeBlockBitmapSize(blockSize))
exports.computeSectorOfBitmap = computeSectorOfBitmap
// Sectors conversions.
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / SECTOR_SIZE) || 1
exports.sectorsRoundUpNoZero = sectorsRoundUpNoZero
const sectorsToBytes = sectors => sectors * SECTOR_SIZE
exports.sectorsToBytes = sectorsToBytes
const assertChecksum = (name, buf, struct) => {
const actual = unpackField(struct.fields.checksum, buf)
const expected = checksumStruct(buf, struct)
assert.strictEqual(actual, expected, `invalid ${name} checksum ${actual}, expected ${expected}`)
}
exports.assertChecksum = assertChecksum
// unused block as buffer containing a uint32BE
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(4)
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
exports.BUF_BLOCK_UNUSED = BUF_BLOCK_UNUSED
/**
* Check and parse the header buffer to build an header object
*
* @param {Buffer} bufHeader
* @param {Object} footer
* @returns {Object} the parsed header
*/
exports.unpackHeader = (bufHeader, footer) => {
assertChecksum('header', bufHeader, fuHeader)
const header = fuHeader.unpack(bufHeader)
checkHeader(header, footer)
return header
}
/**
* Check and parse the footer buffer to build a footer object
*
* @param {Buffer} bufHeader
* @param {Object} footer
* @returns {Object} the parsed footer
*/
exports.unpackFooter = bufFooter => {
assertChecksum('footer', bufFooter, fuFooter)
const footer = fuFooter.unpack(bufFooter)
checkFooter(footer)
return footer
}

View File

@@ -0,0 +1,7 @@
const MASK = 0x80
exports.set = (map, bit) => {
map[bit >> 3] |= MASK >> (bit & 7)
}
exports.test = (map, bit) => ((map[bit >> 3] << (bit & 7)) & MASK) !== 0

View File

@@ -1,8 +1,8 @@
import assert from 'assert'
const assert = require('assert')
import { HEADER_COOKIE, HEADER_VERSION, SECTOR_SIZE } from './_constants'
const { HEADER_COOKIE, HEADER_VERSION, SECTOR_SIZE } = require('./_constants')
export default (header, footer) => {
module.exports = (header, footer) => {
assert.strictEqual(header.cookie, HEADER_COOKIE)
assert.strictEqual(header.dataOffset, undefined)
assert.strictEqual(header.headerVersion, HEADER_VERSION)

View File

@@ -1,6 +1,6 @@
import { SECTOR_SIZE } from './_constants'
const { SECTOR_SIZE } = require('./_constants')
export default function computeGeometryForSize(size) {
module.exports = function computeGeometryForSize(size) {
const totalSectors = Math.min(Math.ceil(size / 512), 65535 * 16 * 255)
let sectorsPerTrackCylinder
let heads

View File

@@ -0,0 +1,40 @@
exports.BLOCK_UNUSED = 0xffffffff
// This lib has been extracted from the Xen Orchestra project.
exports.CREATOR_APPLICATION = 'xo '
// Sizes in bytes.
exports.FOOTER_SIZE = 512
exports.HEADER_SIZE = 1024
exports.SECTOR_SIZE = 512
exports.DEFAULT_BLOCK_SIZE = 0x00200000 // from the spec
exports.FOOTER_COOKIE = 'conectix'
exports.HEADER_COOKIE = 'cxsparse'
exports.DISK_TYPES = {
__proto__: null,
FIXED: 2,
DYNAMIC: 3,
DIFFERENCING: 4,
}
exports.PARENT_LOCATOR_ENTRIES = 8
exports.PLATFORMS = {
__proto__: null,
NONE: 0,
WI2R: 0x57693272,
WI2K: 0x5769326b,
W2RU: 0x57327275,
W2KU: 0x57326b75,
MAC: 0x4d616320,
MACX: 0x4d616358,
}
exports.FILE_FORMAT_VERSION = 1 << 16
exports.HEADER_VERSION = 1 << 16
exports.ALIAS_MAX_PATH_LENGTH = 1024

View File

@@ -1,5 +1,5 @@
/* eslint-env jest */
import { createFooter } from './_createFooterHeader'
const { createFooter } = require('./_createFooterHeader')
test('createFooter() does not crash', () => {
createFooter(104448, Math.floor(Date.now() / 1000), {

View File

@@ -1,9 +1,9 @@
import { v4 as generateUuid } from 'uuid'
const { v4: generateUuid } = require('uuid')
import { checksumStruct, fuFooter, fuHeader } from './_structs'
import {
const { checksumStruct, fuFooter, fuHeader } = require('./_structs')
const {
CREATOR_APPLICATION,
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
DEFAULT_BLOCK_SIZE: VHD_BLOCK_SIZE_BYTES,
DISK_TYPES,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
@@ -12,9 +12,9 @@ import {
HEADER_SIZE,
HEADER_VERSION,
PLATFORMS,
} from './_constants'
} = require('./_constants')
export function createFooter(size, timestamp, geometry, dataOffset, diskType = DISK_TYPES.FIXED) {
exports.createFooter = function createFooter(size, timestamp, geometry, dataOffset, diskType = DISK_TYPES.FIXED) {
const footer = fuFooter.pack({
cookie: FOOTER_COOKIE,
features: 2,
@@ -33,7 +33,7 @@ export function createFooter(size, timestamp, geometry, dataOffset, diskType = D
return footer
}
export function createHeader(
exports.createHeader = function createHeader(
maxTableEntries,
tableOffset = HEADER_SIZE + FOOTER_SIZE,
blockSize = VHD_BLOCK_SIZE_BYTES

View File

@@ -1,10 +1,10 @@
import assert from 'assert'
const assert = require('assert')
import { BLOCK_UNUSED } from './_constants'
const { BLOCK_UNUSED } = require('./_constants')
// get the identifiers and first sectors of the first and last block
// in the file
export default bat => {
module.exports = bat => {
const n = bat.length
if (n === 0) {
return

View File

@@ -0,0 +1 @@
module.exports = Function.prototype

View File

@@ -1,5 +1,5 @@
import { dirname, resolve } from 'path'
const { dirname, resolve } = require('path')
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
export { resolveRelativeFromFile as default }
module.exports = resolveRelativeFromFile

View File

@@ -1,7 +1,7 @@
import assert from 'assert'
import fu from 'struct-fu'
const assert = require('assert')
const fu = require('struct-fu')
import { FOOTER_SIZE, HEADER_SIZE, PARENT_LOCATOR_ENTRIES } from './_constants'
const { FOOTER_SIZE, HEADER_SIZE, PARENT_LOCATOR_ENTRIES } = require('./_constants')
const SIZE_OF_32_BITS = Math.pow(2, 32)
@@ -17,7 +17,7 @@ const uint64Undefinable = fu.derive(
_ => (_[0] === 0xffffffff && _[1] === 0xffffffff ? undefined : _[0] * SIZE_OF_32_BITS + _[1])
)
export const fuFooter = fu.struct([
const fuFooter = fu.struct([
fu.char('cookie', 8), // 0
fu.uint32('features'), // 8
fu.uint32('fileFormatVersion'), // 12
@@ -40,9 +40,10 @@ export const fuFooter = fu.struct([
fu.char('hidden'), // 85 TODO: should probably be merged in reserved
fu.char('reserved', 426), // 86
])
exports.fuFooter = fuFooter
assert.strictEqual(fuFooter.size, FOOTER_SIZE)
export const fuHeader = fu.struct([
const fuHeader = fu.struct([
fu.char('cookie', 8),
uint64Undefinable('dataOffset'),
uint64('tableOffset'),
@@ -67,15 +68,18 @@ export const fuHeader = fu.struct([
),
fu.char('reserved2', 256),
])
exports.fuHeader = fuHeader
assert.strictEqual(fuHeader.size, HEADER_SIZE)
export const packField = (field, value, buf) => {
const packField = (field, value, buf) => {
const { offset } = field
field.pack(value, buf, typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset)
}
exports.packField = packField
export const unpackField = (field, buf) => {
exports.unpackField = (field, buf) => {
const { offset } = field
return field.unpack(buf, typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset)
@@ -83,7 +87,7 @@ export const unpackField = (field, buf) => {
// Returns the checksum of a raw struct.
// The raw struct (footer or header) is altered with the new sum.
export function checksumStruct(buf, struct) {
exports.checksumStruct = function checksumStruct(buf, struct) {
const checksumField = struct.fields.checksum
let sum = 0

View File

@@ -1,12 +1,12 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getSyncedHandler } from '@xen-orchestra/fs'
import { Disposable, pFromCallback } from 'promise-toolbox'
const rimraf = require('rimraf')
const tmp = require('tmp')
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { Disposable, pFromCallback } = require('promise-toolbox')
import { isVhdAlias, resolveAlias } from './_resolveAlias'
import { ALIAS_MAX_PATH_LENGTH } from './_constants'
const { isVhdAlias, resolveVhdAlias } = require('./aliases')
const { ALIAS_MAX_PATH_LENGTH } = require('./_constants')
let tempDir
@@ -28,7 +28,7 @@ test('is vhd alias recognize only *.alias.vhd files', () => {
})
test('resolve return the path in argument for a non alias file ', async () => {
expect(await resolveAlias(null, 'filename.vhd')).toEqual('filename.vhd')
expect(await resolveVhdAlias(null, 'filename.vhd')).toEqual('filename.vhd')
})
test('resolve get the path of the target file for an alias', async () => {
await Disposable.use(async function* () {
@@ -36,12 +36,12 @@ test('resolve get the path of the target file for an alias', async () => {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const alias = `alias.alias.vhd`
await handler.writeFile(alias, 'target.vhd')
await expect(await resolveAlias(handler, alias)).toEqual(`target.vhd`)
await expect(await resolveVhdAlias(handler, alias)).toEqual(`target.vhd`)
// different directory
await handler.mkdir(`sub`)
await handler.writeFile(alias, 'sub/target.vhd', { flags: 'w' })
await expect(await resolveAlias(handler, alias)).toEqual(`sub/target.vhd`)
await expect(await resolveVhdAlias(handler, alias)).toEqual(`sub/target.vhd`)
})
})
@@ -51,7 +51,7 @@ test('resolve throws an error an alias to an alias', async () => {
const alias = `alias.alias.vhd`
const target = `target.alias.vhd`
await handler.writeFile(alias, target)
await expect(async () => await resolveAlias(handler, alias)).rejects.toThrow(Error)
await expect(async () => await resolveVhdAlias(handler, alias)).rejects.toThrow(Error)
})
})
@@ -59,6 +59,6 @@ test('resolve throws an error on a file too big ', async () => {
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
await handler.writeFile('toobig.alias.vhd', Buffer.alloc(ALIAS_MAX_PATH_LENGTH + 1, 0))
await expect(async () => await resolveAlias(handler, 'toobig.alias.vhd')).rejects.toThrow(Error)
await expect(async () => await resolveVhdAlias(handler, 'toobig.alias.vhd')).rejects.toThrow(Error)
})
})

View File

@@ -1,11 +1,12 @@
import { ALIAS_MAX_PATH_LENGTH } from './_constants'
import resolveRelativeFromFile from './_resolveRelativeFromFile'
const { ALIAS_MAX_PATH_LENGTH } = require('./_constants')
const resolveRelativeFromFile = require('./_resolveRelativeFromFile')
export function isVhdAlias(filename) {
function isVhdAlias(filename) {
return filename.endsWith('.alias.vhd')
}
exports.isVhdAlias = isVhdAlias
export async function resolveAlias(handler, filename) {
exports.resolveVhdAlias = async function resolveVhdAlias(handler, filename) {
if (!isVhdAlias(filename)) {
return filename
}

View File

@@ -1,10 +1,10 @@
import { dirname, relative } from 'path'
const { dirname, relative } = require('path')
import { openVhd } from './'
import { DISK_TYPES } from './_constants'
import { Disposable } from 'promise-toolbox'
const { openVhd } = require('./openVhd')
const { DISK_TYPES } = require('./_constants')
const { Disposable } = require('promise-toolbox')
export default async function chain(parentHandler, parentPath, childHandler, childPath, force = false) {
module.exports = async function chain(parentHandler, parentPath, childHandler, childPath, force = false) {
await Disposable.use(
[openVhd(parentHandler, parentPath), openVhd(childHandler, childPath)],
async ([parentVhd, childVhd]) => {

View File

@@ -0,0 +1,14 @@
const { openVhd } = require('./openVhd')
const resolveRelativeFromFile = require('./_resolveRelativeFromFile')
const { DISK_TYPES } = require('./_constants')
const { Disposable } = require('promise-toolbox')
module.exports = async function checkChain(handler, path) {
await Disposable.use(function* () {
let vhd
do {
vhd = yield openVhd(handler, path)
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
} while (vhd.footer.diskType !== DISK_TYPES.DYNAMIC)
})
}

View File

@@ -1,8 +1,8 @@
import assert from 'assert'
const assert = require('assert')
import { DISK_TYPES, FILE_FORMAT_VERSION, FOOTER_COOKIE, FOOTER_SIZE } from './_constants'
const { DISK_TYPES, FILE_FORMAT_VERSION, FOOTER_COOKIE, FOOTER_SIZE } = require('./_constants')
export default footer => {
module.exports = footer => {
assert.strictEqual(footer.cookie, FOOTER_COOKIE)
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)

View File

@@ -1,19 +1,19 @@
import assert from 'assert'
import asyncIteratorToStream from 'async-iterator-to-stream'
import { forEachRight } from 'lodash'
const assert = require('assert')
const asyncIteratorToStream = require('async-iterator-to-stream')
const { forEachRight } = require('lodash')
import computeGeometryForSize from './_computeGeometryForSize'
import { createFooter, createHeader } from './_createFooterHeader'
import {
const computeGeometryForSize = require('./_computeGeometryForSize')
const { createFooter, createHeader } = require('./_createFooterHeader')
const {
BLOCK_UNUSED,
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
DEFAULT_BLOCK_SIZE: VHD_BLOCK_SIZE_BYTES,
DISK_TYPES,
FOOTER_SIZE,
HEADER_SIZE,
SECTOR_SIZE,
} from './_constants'
} = require('./_constants')
import { set as setBitmap } from './_bitmap'
const { set: setBitmap } = require('./_bitmap')
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
@@ -55,7 +55,12 @@ function createBAT({ firstBlockPosition, fragmentLogicAddressList, fragmentSize,
* @returns {Promise<Function>}
*/
export default async function createReadableStream(diskSize, fragmentSize, fragmentLogicAddressList, fragmentIterator) {
module.exports = async function createReadableStream(
diskSize,
fragmentSize,
fragmentLogicAddressList,
fragmentIterator
) {
const ratio = VHD_BLOCK_SIZE_BYTES / fragmentSize
if (ratio % 1 !== 0) {
throw new Error(

View File

@@ -0,0 +1,54 @@
const { parseVhdStream } = require('./parseVhdStream.js')
const { VhdDirectory } = require('./Vhd/VhdDirectory.js')
const { Disposable } = require('promise-toolbox')
const { asyncEach } = require('@vates/async-each')
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression }) {
const vhd = yield VhdDirectory.create(handler, path, { compression })
await asyncEach(
parseVhdStream(inputStream),
async function (item) {
switch (item.type) {
case 'footer':
vhd.footer = item.footer
break
case 'header':
vhd.header = item.header
break
case 'parentLocator':
await vhd.writeParentLocator({ ...item, data: item.buffer })
break
case 'block':
await vhd.writeEntireBlock(item)
break
case 'bat':
// it exists but I don't care
break
default:
throw new Error(`unhandled type of block generated by parser : ${item.type} while generating ${path}`)
}
},
{
concurrency,
}
)
await Promise.all([vhd.writeFooter(), vhd.writeHeader(), vhd.writeBlockAllocationTable()])
})
exports.createVhdDirectoryFromStream = async function createVhdDirectoryFromStream(
handler,
path,
inputStream,
{ validator, concurrency = 16, compression } = {}
) {
try {
await buildVhd(handler, path, inputStream, { concurrency, compression })
if (validator !== undefined) {
await validator.call(this, path)
}
} catch (error) {
// cleanup on error
await handler.rmtree(path)
throw error
}
}

View File

@@ -1,17 +1,17 @@
/* eslint-env jest */
import execa from 'execa'
import fs from 'fs-extra'
import rimraf from 'rimraf'
import getStream from 'get-stream'
import tmp from 'tmp'
import { createReadStream, createWriteStream } from 'fs'
import { pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
const execa = require('execa')
const fs = require('fs-extra')
const rimraf = require('rimraf')
const getStream = require('get-stream')
const tmp = require('tmp')
const { createReadStream, createWriteStream } = require('fs')
const { pFromCallback } = require('promise-toolbox')
const { pipeline } = require('readable-stream')
import { createVhdStreamWithLength } from '.'
import { FOOTER_SIZE } from './_constants'
import { createRandomFile, convertFromRawToVhd, convertFromVhdToRaw } from './tests/utils'
const { createVhdStreamWithLength } = require('./createVhdStreamWithLength.js')
const { FOOTER_SIZE } = require('./_constants')
const { createRandomFile, convertFromRawToVhd, convertFromVhdToRaw } = require('./tests/utils')
let tempDir = null

View File

@@ -1,13 +1,13 @@
import assert from 'assert'
import { pipeline, Transform } from 'readable-stream'
import { readChunk } from '@vates/read-chunk'
const assert = require('assert')
const { pipeline, Transform } = require('readable-stream')
const { readChunk } = require('@vates/read-chunk')
import checkFooter from './checkFooter'
import checkHeader from './_checkHeader'
import noop from './_noop'
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
import { FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from './_constants'
import { fuFooter, fuHeader } from './_structs'
const checkFooter = require('./checkFooter')
const checkHeader = require('./_checkHeader')
const noop = require('./_noop')
const getFirstAndLastBlocks = require('./_getFirstAndLastBlocks')
const { FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } = require('./_constants')
const { fuFooter, fuHeader } = require('./_structs')
class EndCutterStream extends Transform {
constructor(footerOffset, footerBuffer) {
@@ -35,7 +35,7 @@ class EndCutterStream extends Transform {
}
}
export default async function createVhdStreamWithLength(stream) {
module.exports = async function createVhdStreamWithLength(stream) {
const readBuffers = []
let streamPosition = 0

14
packages/vhd-lib/index.js Normal file
View File

@@ -0,0 +1,14 @@
exports.chainVhd = require('./chain')
exports.checkFooter = require('./checkFooter')
exports.checkVhdChain = require('./checkChain')
exports.createReadableSparseStream = require('./createReadableSparseStream')
exports.createVhdStreamWithLength = require('./createVhdStreamWithLength')
exports.createVhdDirectoryFromStream = require('./createVhdDirectoryFromStream').createVhdDirectoryFromStream
exports.mergeVhd = require('./merge')
exports.peekFooterFromVhdStream = require('./peekFooterFromVhdStream')
exports.openVhd = require('./openVhd').openVhd
exports.VhdAbstract = require('./Vhd/VhdAbstract').VhdAbstract
exports.VhdDirectory = require('./Vhd/VhdDirectory').VhdDirectory
exports.VhdFile = require('./Vhd/VhdFile').VhdFile
exports.VhdSynthetic = require('./Vhd/VhdSynthetic').VhdSynthetic
exports.Constants = require('./_constants')

View File

@@ -1,14 +1,14 @@
/* eslint-env jest */
import fs from 'fs-extra'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { pFromCallback } from 'promise-toolbox'
const fs = require('fs-extra')
const rimraf = require('rimraf')
const tmp = require('tmp')
const { getHandler } = require('@xen-orchestra/fs')
const { pFromCallback } = require('promise-toolbox')
import { VhdFile, chainVhd, mergeVhd as vhdMerge } from './index'
const { VhdFile, chainVhd, mergeVhd: vhdMerge } = require('./index')
import { checkFile, createRandomFile, convertFromRawToVhd } from './tests/utils'
const { checkFile, createRandomFile, convertFromRawToVhd } = require('./tests/utils')
let tempDir = null

View File

@@ -1,21 +1,34 @@
// TODO: remove once completely merged in vhd.js
import assert from 'assert'
import noop from './_noop'
import { createLogger } from '@xen-orchestra/log'
import { limitConcurrency } from 'limit-concurrency-decorator'
const assert = require('assert')
const noop = require('./_noop')
const { createLogger } = require('@xen-orchestra/log')
const { limitConcurrency } = require('limit-concurrency-decorator')
import { openVhd } from '.'
import { basename, dirname } from 'path'
import { DISK_TYPES } from './_constants'
import { Disposable } from 'promise-toolbox'
const { openVhd } = require('./openVhd')
const { basename, dirname } = require('path')
const { DISK_TYPES } = require('./_constants')
const { Disposable } = require('promise-toolbox')
const { asyncEach } = require('@vates/async-each')
const { VhdDirectory } = require('./Vhd/VhdDirectory')
const { warn } = createLogger('vhd-lib:merge')
function makeThrottledWriter(handler, path, delay) {
let lastWrite = Date.now()
return async json => {
const now = Date.now()
if (now - lastWrite > delay) {
lastWrite = now
await handler.writeFile(path, JSON.stringify(json), { flags: 'w' }).catch(warn)
}
}
}
// Merge vhd child into vhd parent.
//
// TODO: rename the VHD file during the merge
export default limitConcurrency(2)(async function merge(
module.exports = limitConcurrency(2)(async function merge(
parentHandler,
parentPath,
childHandler,
@@ -25,12 +38,16 @@ export default limitConcurrency(2)(async function merge(
const mergeStatePath = dirname(parentPath) + '/' + '.' + basename(parentPath) + '.merge.json'
return await Disposable.use(async function* () {
let mergeState = await parentHandler.readFile(mergeStatePath).catch(error => {
let mergeState
try {
const mergeStateContent = await parentHandler.readFile(mergeStatePath)
mergeState = JSON.parse(mergeStateContent)
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
warn('problem while checking the merge state', { error })
}
// no merge state in case of missing file
})
}
// during merging, the end footer of the parent can be overwritten by new blocks
// we should use it as a way to check vhd health
const parentVhd = yield openVhd(parentHandler, parentPath, {
@@ -38,18 +55,17 @@ export default limitConcurrency(2)(async function merge(
checkSecondFooter: mergeState === undefined,
})
const childVhd = yield openVhd(childHandler, childPath)
if (mergeState !== undefined) {
mergeState = JSON.parse(mergeState)
// ensure the correct merge will be continued
assert.strictEqual(parentVhd.header.checksum, mergeState.parent.header)
assert.strictEqual(childVhd.header.checksum, mergeState.child.header)
} else {
const concurrency = childVhd instanceof VhdDirectory ? 16 : 1
if (mergeState === undefined) {
assert.strictEqual(childVhd.header.blockSize, parentVhd.header.blockSize)
const parentDiskType = parentVhd.footer.diskType
assert(parentDiskType === DISK_TYPES.DIFFERENCING || parentDiskType === DISK_TYPES.DYNAMIC)
assert.strictEqual(childVhd.footer.diskType, DISK_TYPES.DIFFERENCING)
} else {
assert.strictEqual(parentVhd.header.checksum, mergeState.parent.header)
assert.strictEqual(childVhd.header.checksum, mergeState.child.header)
}
// Read allocation table of child/parent.
@@ -74,30 +90,40 @@ export default limitConcurrency(2)(async function merge(
}
// counts number of allocated blocks
let nBlocks = 0
const toMerge = []
for (let block = mergeState.currentBlock; block < maxTableEntries; block++) {
if (childVhd.containsBlock(block)) {
nBlocks += 1
toMerge.push(block)
}
}
const nBlocks = toMerge.length
onProgress({ total: nBlocks, done: 0 })
// merges blocks
for (let i = 0; i < nBlocks; ++i, ++mergeState.currentBlock) {
while (!childVhd.containsBlock(mergeState.currentBlock)) {
++mergeState.currentBlock
const merging = new Set()
let counter = 0
const mergeStateWriter = makeThrottledWriter(parentHandler, mergeStatePath, 10e3)
await asyncEach(
toMerge,
async blockId => {
merging.add(blockId)
mergeState.mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
merging.delete(blockId)
onProgress({
total: nBlocks,
done: counter + 1,
})
counter++
mergeState.currentBlock = Math.min(...merging)
mergeStateWriter(mergeState)
},
{
concurrency,
}
await parentHandler.writeFile(mergeStatePath, JSON.stringify(mergeState), { flags: 'w' }).catch(warn)
mergeState.mergedDataSize += await parentVhd.coalesceBlock(childVhd, mergeState.currentBlock)
onProgress({
total: nBlocks,
done: i + 1,
})
}
)
onProgress({ total: nBlocks, done: nBlocks })
// some blocks could have been created or moved in parent : write bat
await parentVhd.writeBlockAllocationTable()

View File

@@ -1,14 +1,14 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getSyncedHandler } from '@xen-orchestra/fs'
import { Disposable, pFromCallback } from 'promise-toolbox'
const rimraf = require('rimraf')
const tmp = require('tmp')
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { Disposable, pFromCallback } = require('promise-toolbox')
import { openVhd } from './index'
import { createRandomFile, convertFromRawToVhd, createRandomVhdDirectory } from './tests/utils'
const { openVhd } = require('./index')
const { createRandomFile, convertFromRawToVhd, createRandomVhdDirectory } = require('./tests/utils')
import { VhdAbstract } from './Vhd/VhdAbstract'
const { VhdAbstract } = require('./Vhd/VhdAbstract')
let tempDir

View File

@@ -0,0 +1,15 @@
const { resolveVhdAlias } = require('./aliases')
const { VhdDirectory } = require('./Vhd/VhdDirectory.js')
const { VhdFile } = require('./Vhd/VhdFile.js')
exports.openVhd = async function openVhd(handler, path, opts) {
const resolved = await resolveVhdAlias(handler, path)
try {
return await VhdFile.open(handler, resolved, opts)
} catch (e) {
if (e.code !== 'EISDIR') {
throw e
}
return await VhdDirectory.open(handler, resolved, opts)
}
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "vhd-lib",
"version": "2.0.3",
"version": "3.0.0",
"license": "AGPL-3.0-or-later",
"description": "Primitives for VHD file handling",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
@@ -11,9 +11,8 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"main": "dist/",
"engines": {
"node": ">=10"
"node": ">=12"
},
"dependencies": {
"@vates/async-each": "^0.1.0",
@@ -28,25 +27,12 @@
"uuid": "^8.3.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@xen-orchestra/fs": "^0.19.2",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"execa": "^5.0.0",
"@xen-orchestra/fs": "^0.19.3",
"get-stream": "^6.0.0",
"readable-stream": "^3.0.6",
"rimraf": "^3.0.0",
"tmp": "^0.2.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
},
"author": {

View File

@@ -1,10 +1,7 @@
import { VhdDirectory } from './'
import { BLOCK_UNUSED, FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from './_constants'
import { readChunk } from '@vates/read-chunk'
import assert from 'assert'
import { Disposable } from 'promise-toolbox'
import { unpackFooter, unpackHeader, computeBlockBitmapSize } from './Vhd/_utils'
import { asyncEach } from '@vates/async-each'
const { BLOCK_UNUSED, FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } = require('./_constants')
const { readChunk } = require('@vates/read-chunk')
const assert = require('assert')
const { unpackFooter, unpackHeader, computeFullBlockSize } = require('./Vhd/_utils')
const cappedBufferConcat = (buffers, maxSize) => {
let buffer = Buffer.concat(buffers)
@@ -14,7 +11,7 @@ const cappedBufferConcat = (buffers, maxSize) => {
return buffer
}
async function* parse(stream) {
exports.parseVhdStream = async function* parseVhdStream(stream) {
let bytesRead = 0
// handle empty space between elements
@@ -43,8 +40,9 @@ async function* parse(stream) {
const blockSize = header.blockSize
assert.strictEqual(blockSize % SECTOR_SIZE, 0)
const blockBitmapSize = computeBlockBitmapSize(blockSize)
const blockAndBitmapSize = blockBitmapSize + blockSize
const fullBlockSize = computeFullBlockSize(blockSize)
const bitmapSize = fullBlockSize - blockSize
const index = []
@@ -77,8 +75,13 @@ async function* parse(stream) {
while (index.length > 0) {
const item = index.shift()
const buffer = await read(item.offset, item.size)
if (item.type === 'bat') {
// found the BAT : read it and ad block to index
item.buffer = buffer
const { type } = item
if (type === 'bat') {
// found the BAT : read it and add block to index
let blockCount = 0
for (let blockCounter = 0; blockCounter < header.maxTableEntries; blockCounter++) {
const batEntrySector = buffer.readUInt32BE(blockCounter * 4)
// unallocated block, no need to export it
@@ -90,15 +93,20 @@ async function* parse(stream) {
type: 'block',
id: blockCounter,
offset: batEntryBytes,
size: blockAndBitmapSize,
size: fullBlockSize,
})
blockCount++
}
}
// sort again index to ensure block and parent locator are in the right order
index.sort((a, b) => a.offset - b.offset)
} else {
yield { ...item, buffer }
item.blockCount = blockCount
} else if (type === 'block') {
item.bitmap = buffer.slice(0, bitmapSize)
item.data = buffer.slice(bitmapSize)
}
yield item
}
/**
@@ -124,45 +132,3 @@ function readLastSector(stream) {
stream.on('error', reject)
})
}
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency }) {
const vhd = yield VhdDirectory.create(handler, path)
await asyncEach(
parse(inputStream),
async function (item) {
switch (item.type) {
case 'footer':
vhd.footer = item.footer
break
case 'header':
vhd.header = item.header
break
case 'parentLocator':
await vhd.writeParentLocator({ ...item, data: item.buffer })
break
case 'block':
await vhd.writeEntireBlock(item)
break
default:
throw new Error(`unhandled type of block generated by parser : ${item.type} while generating ${path}`)
}
},
{
concurrency,
}
)
await Promise.all([vhd.writeFooter(), vhd.writeHeader(), vhd.writeBlockAllocationTable()])
})
export async function createVhdDirectoryFromStream(handler, path, inputStream, { validator, concurrency = 16 } = {}) {
try {
await buildVhd(handler, path, inputStream, { concurrency })
if (validator !== undefined) {
await validator.call(this, path)
}
} catch (error) {
// cleanup on error
await handler.rmtree(path)
throw error
}
}

View File

@@ -0,0 +1,11 @@
const { readChunk } = require('@vates/read-chunk')
const { FOOTER_SIZE } = require('./_constants')
const { fuFooter } = require('./_structs')
module.exports = async function peekFooterFromStream(stream) {
const footerBuffer = await readChunk(stream, FOOTER_SIZE)
const footer = fuFooter.unpack(footerBuffer)
stream.unshift(footerBuffer)
return footer
}

View File

@@ -1,57 +0,0 @@
import assert from 'assert'
import { BLOCK_UNUSED, SECTOR_SIZE } from '../_constants'
import { fuFooter, fuHeader, checksumStruct, unpackField } from '../_structs'
import checkFooter from '../checkFooter'
import checkHeader from '../_checkHeader'
export const computeBatSize = entries => sectorsToBytes(sectorsRoundUpNoZero(entries * 4))
export const computeSectorsPerBlock = blockSize => blockSize / SECTOR_SIZE
// one bit per sector
export const computeBlockBitmapSize = blockSize => computeSectorsPerBlock(blockSize) >>> 3
export const computeSectorOfBitmap = blockSize => sectorsRoundUpNoZero(computeBlockBitmapSize(blockSize))
// Sectors conversions.
export const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / SECTOR_SIZE) || 1
export const sectorsToBytes = sectors => sectors * SECTOR_SIZE
export const assertChecksum = (name, buf, struct) => {
const actual = unpackField(struct.fields.checksum, buf)
const expected = checksumStruct(buf, struct)
assert.strictEqual(actual, expected, `invalid ${name} checksum ${actual}, expected ${expected}`)
}
// unused block as buffer containing a uint32BE
export const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(4)
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
/**
* Check and parse the header buffer to build an header object
*
* @param {Buffer} bufHeader
* @param {Object} footer
* @returns {Object} the parsed header
*/
export const unpackHeader = (bufHeader, footer) => {
assertChecksum('header', bufHeader, fuHeader)
const header = fuHeader.unpack(bufHeader)
checkHeader(header, footer)
return header
}
/**
* Check and parse the footer buffer to build a footer object
*
* @param {Buffer} bufHeader
* @param {Object} footer
* @returns {Object} the parsed footer
*/
export const unpackFooter = bufFooter => {
assertChecksum('footer', bufFooter, fuFooter)
const footer = fuFooter.unpack(bufFooter)
checkFooter(footer)
return footer
}

View File

@@ -1,7 +0,0 @@
const MASK = 0x80
export const set = (map, bit) => {
map[bit >> 3] |= MASK >> (bit & 7)
}
export const test = (map, bit) => ((map[bit >> 3] << (bit & 7)) & MASK) !== 0

View File

@@ -1,40 +0,0 @@
export const BLOCK_UNUSED = 0xffffffff
// This lib has been extracted from the Xen Orchestra project.
export const CREATOR_APPLICATION = 'xo '
// Sizes in bytes.
export const FOOTER_SIZE = 512
export const HEADER_SIZE = 1024
export const SECTOR_SIZE = 512
export const DEFAULT_BLOCK_SIZE = 0x00200000 // from the spec
export const FOOTER_COOKIE = 'conectix'
export const HEADER_COOKIE = 'cxsparse'
export const DISK_TYPES = {
__proto__: null,
FIXED: 2,
DYNAMIC: 3,
DIFFERENCING: 4,
}
export const PARENT_LOCATOR_ENTRIES = 8
export const PLATFORMS = {
__proto__: null,
NONE: 0,
WI2R: 0x57693272,
WI2K: 0x5769326b,
W2RU: 0x57327275,
W2KU: 0x57326b75,
MAC: 0x4d616320,
MACX: 0x4d616358,
}
export const FILE_FORMAT_VERSION = 1 << 16
export const HEADER_VERSION = 1 << 16
export const ALIAS_MAX_PATH_LENGTH = 1024

View File

@@ -1 +0,0 @@
export default Function.prototype

View File

@@ -1,14 +0,0 @@
import { openVhd } from '.'
import resolveRelativeFromFile from './_resolveRelativeFromFile'
import { DISK_TYPES } from './_constants'
import { Disposable } from 'promise-toolbox'
export default async function checkChain(handler, path) {
await Disposable.use(function* () {
let vhd
do {
vhd = yield openVhd(handler, path)
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
} while (vhd.footer.diskType !== DISK_TYPES.DYNAMIC)
})
}

View File

@@ -1,116 +0,0 @@
/* eslint-env jest */
import execa from 'execa'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { createWriteStream, readFile } from 'fs-extra'
import { fromEvent, pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { createReadableRawStream, createReadableSparseStream } from './'
import { checkFile, convertFromVhdToRaw } from './tests/utils'
let tempDir = null
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('ReadableRawVHDStream does not crash', async () => {
const data = [
{
logicalAddressBytes: 100,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
logicalAddressBytes: 700,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
let index = 0
const mockParser = {
next: () => {
if (index < data.length) {
const result = data[index]
index++
return result
} else {
return null
}
},
}
const fileSize = 1000
const stream = createReadableRawStream(fileSize, mockParser)
await pFromCallback(cb => pipeline(stream, createWriteStream(`${tempDir}/output.vhd`), cb))
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
})
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
const data = [
{
logicalAddressBytes: 700,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
logicalAddressBytes: 100,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
let index = 0
const mockParser = {
next: () => {
if (index < data.length) {
const result = data[index]
index++
return result
} else {
return null
}
},
}
return expect(
new Promise((resolve, reject) => {
const stream = createReadableRawStream(100000, mockParser)
stream.on('error', reject)
pipeline(stream, createWriteStream(`${tempDir}/outputStream`), err => (err ? reject(err) : resolve()))
})
).rejects.toThrow('Received out of order blocks')
})
test('ReadableSparseVHDStream can handle a sparse file', async () => {
const blockSize = Math.pow(2, 16)
const blocks = [
{
logicalAddressBytes: blockSize * 3,
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
},
{
logicalAddressBytes: blockSize * 100,
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
},
]
const fileSize = blockSize * 110
const stream = await createReadableSparseStream(
fileSize,
blockSize,
blocks.map(b => b.logicalAddressBytes / blockSize),
blocks
)
expect(stream.length).toEqual(4197888)
const pipe = stream.pipe(createWriteStream(`${tempDir}/output.vhd`))
await fromEvent(pipe, 'finish')
await checkFile(`${tempDir}/output.vhd`)
await convertFromVhdToRaw(`${tempDir}/output.vhd`, `${tempDir}/out1.raw`)
const out1 = await readFile(`${tempDir}/out1.raw`)
const expected = Buffer.alloc(fileSize)
blocks.forEach(b => {
b.data.copy(expected, b.logicalAddressBytes)
})
await expect(out1.slice(0, expected.length)).toEqual(expected)
})

View File

@@ -1,34 +0,0 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import computeGeometryForSize from './_computeGeometryForSize'
import { createFooter } from './_createFooterHeader'
export default asyncIteratorToStream(async function* (size, blockParser) {
const geometry = computeGeometryForSize(size)
const actualSize = geometry.actualSize
const footer = createFooter(actualSize, Math.floor(Date.now() / 1000), geometry)
let position = 0
function* filePadding(paddingLength) {
if (paddingLength > 0) {
const chunkSize = 1024 * 1024 // 1Mo
for (let paddingPosition = 0; paddingPosition + chunkSize < paddingLength; paddingPosition += chunkSize) {
yield Buffer.alloc(chunkSize)
}
yield Buffer.alloc(paddingLength % chunkSize)
}
}
let next
while ((next = await blockParser.next()) !== null) {
const paddingLength = next.logicalAddressBytes - position
if (paddingLength < 0) {
throw new Error('Received out of order blocks')
}
yield* filePadding(paddingLength)
yield next.data
position = next.logicalAddressBytes + next.data.length
}
yield* filePadding(actualSize - position)
yield footer
})

View File

@@ -1,15 +0,0 @@
export { default as chainVhd } from './chain'
export { default as checkFooter } from './checkFooter'
export { default as checkVhdChain } from './checkChain'
export { default as createReadableRawStream } from './createReadableRawStream'
export { default as createReadableSparseStream } from './createReadableSparseStream'
export { default as createVhdStreamWithLength } from './createVhdStreamWithLength'
export { createVhdDirectoryFromStream } from './createVhdDirectoryFromStream'
export { default as mergeVhd } from './merge'
export { default as peekFooterFromVhdStream } from './peekFooterFromVhdStream'
export { openVhd } from './openVhd'
export { VhdAbstract } from './Vhd/VhdAbstract'
export { VhdDirectory } from './Vhd/VhdDirectory'
export { VhdFile } from './Vhd/VhdFile'
export { VhdSynthetic } from './Vhd/VhdSynthetic'
export * as Constants from './_constants'

View File

@@ -1,14 +0,0 @@
import { resolveAlias } from './_resolveAlias'
import { VhdFile, VhdDirectory } from './'
export async function openVhd(handler, path, opts) {
const resolved = await resolveAlias(handler, path)
try {
return await VhdFile.open(handler, resolved, opts)
} catch (e) {
if (e.code !== 'EISDIR') {
throw e
}
return await VhdDirectory.open(handler, resolved, opts)
}
}

View File

@@ -1,11 +0,0 @@
import { readChunk } from '@vates/read-chunk'
import { FOOTER_SIZE } from './_constants'
import { fuFooter } from './_structs'
export default async function peekFooterFromStream(stream) {
const footerBuffer = await readChunk(stream, FOOTER_SIZE)
const footer = fuFooter.unpack(footerBuffer)
stream.unshift(footerBuffer)
return footer
}

View File

@@ -1,9 +1,9 @@
import { pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import asyncIteratorToStream from 'async-iterator-to-stream'
import execa from 'execa'
import fs from 'fs-extra'
import { randomBytes } from 'crypto'
const { pFromCallback } = require('promise-toolbox')
const { pipeline } = require('readable-stream')
const asyncIteratorToStream = require('async-iterator-to-stream')
const execa = require('execa')
const fs = require('fs-extra')
const { randomBytes } = require('crypto')
const createRandomStream = asyncIteratorToStream(function* (size) {
while (size > 0) {
@@ -12,14 +12,16 @@ const createRandomStream = asyncIteratorToStream(function* (size) {
}
})
export async function createRandomFile(name, sizeMB) {
async function createRandomFile(name, sizeMB) {
const input = createRandomStream(sizeMB * 1024 * 1024)
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
}
exports.createRandomFile = createRandomFile
export async function checkFile(vhdName) {
async function checkFile(vhdName) {
await execa('vhd-util', ['check', '-p', '-b', '-t', '-n', vhdName])
}
exports.checkFile = checkFile
const RAW = 'raw'
const VHD = 'vpc'
@@ -29,19 +31,21 @@ async function convert(inputFormat, inputFile, outputFormat, outputFile) {
await execa('qemu-img', ['convert', `-f${inputFormat}`, '-O', outputFormat, inputFile, outputFile])
}
export async function convertFromRawToVhd(rawName, vhdName) {
async function convertFromRawToVhd(rawName, vhdName) {
await convert(RAW, rawName, VHD, vhdName)
}
exports.convertFromRawToVhd = convertFromRawToVhd
export async function convertFromVhdToRaw(vhdName, rawName) {
async function convertFromVhdToRaw(vhdName, rawName) {
await convert(VHD, vhdName, RAW, rawName)
}
exports.convertFromVhdToRaw = convertFromVhdToRaw
export async function convertFromVmdkToRaw(vmdkName, rawName) {
exports.convertFromVmdkToRaw = async function convertFromVmdkToRaw(vmdkName, rawName) {
await convert(VMDK, vmdkName, RAW, rawName)
}
export async function recoverRawContent(vhdName, rawName, originalSize) {
exports.recoverRawContent = async function recoverRawContent(vhdName, rawName, originalSize) {
// todo should use createContentStream
await checkFile(vhdName)
await convertFromVhdToRaw(vhdName, rawName)
@@ -51,7 +55,7 @@ export async function recoverRawContent(vhdName, rawName, originalSize) {
}
// @ todo how can I call vhd-cli copy from here
export async function convertToVhdDirectory(rawFileName, vhdFileName, path) {
async function convertToVhdDirectory(rawFileName, vhdFileName, path) {
fs.mkdirp(path)
const srcVhd = await fs.open(vhdFileName, 'r')
@@ -87,8 +91,9 @@ export async function convertToVhdDirectory(rawFileName, vhdFileName, path) {
}
await fs.close(srcRaw)
}
exports.convertToVhdDirectory = convertToVhdDirectory
export async function createRandomVhdDirectory(path, sizeMB) {
exports.createRandomVhdDirectory = async function createRandomVhdDirectory(path, sizeMB) {
fs.mkdirp(path)
const rawFileName = `${path}/temp.raw`
await createRandomFile(rawFileName, sizeMB)

View File

@@ -8,6 +8,6 @@
"promise-toolbox": "^0.19.2",
"readable-stream": "^3.1.1",
"throttle": "^1.0.3",
"vhd-lib": "^2.0.3"
"vhd-lib": "^3.0.0"
}
}

View File

@@ -31,7 +31,8 @@
"node": ">=7.6"
},
"dependencies": {
"bind-property-descriptor": "^1.0.0",
"@vates/coalesce-calls": "^0.1.0",
"bind-property-descriptor": "^2.0.0",
"blocked": "^1.2.1",
"debug": "^4.0.1",
"http-request-plus": "^0.13.0",

View File

@@ -1,15 +0,0 @@
// decorates fn so that more than one concurrent calls will be coalesced
export default function coalesceCalls(fn) {
let promise
const clean = () => {
promise = undefined
}
return function () {
if (promise !== undefined) {
return promise
}
promise = fn.apply(this, arguments)
promise.then(clean, clean)
return promise
}
}

View File

@@ -1,26 +0,0 @@
/* eslint-env jest */
import pDefer from 'promise-toolbox/defer'
import coalesceCalls from './_coalesceCalls'
describe('coalesceCalls', () => {
it('decorates an async function', async () => {
const fn = coalesceCalls(promise => promise)
const defer1 = pDefer()
const promise1 = fn(defer1.promise)
const defer2 = pDefer()
const promise2 = fn(defer2.promise)
defer1.resolve('foo')
expect(await promise1).toBe('foo')
expect(await promise2).toBe('foo')
const defer3 = pDefer()
const promise3 = fn(defer3.promise)
defer3.resolve('bar')
expect(await promise3).toBe('bar')
})
})

View File

@@ -3,6 +3,7 @@ import dns from 'dns'
import kindOf from 'kindof'
import ms from 'ms'
import httpRequest from 'http-request-plus'
import { coalesceCalls } from '@vates/coalesce-calls'
import { Collection } from 'xo-collection'
import { EventEmitter } from 'events'
import { map, noop, omit } from 'lodash'
@@ -10,7 +11,6 @@ import { cancelable, defer, fromCallback, fromEvents, ignoreErrors, pDelay, pRet
import { limitConcurrency } from 'limit-concurrency-decorator'
import autoTransport from './transports/auto'
import coalesceCalls from './_coalesceCalls'
import debug from './_debug'
import getTaskResult from './_getTaskResult'
import isGetAllRecordsMethod from './_isGetAllRecordsMethod'

View File

@@ -396,12 +396,12 @@ class AuditXoPlugin {
}
async _generateFingerprint(props) {
const { oldest = NULL_ID, newest = await this._storage.getLastId() } = props
const { oldest = NULL_ID, newest = (await this._storage.getLastId()) ?? NULL_ID } = props
try {
return {
fingerprint: `${oldest}|${newest}`,
newest,
nValid: await this._checkIntegrity({ oldest, newest }),
nValid: newest !== NULL_ID ? await this._checkIntegrity({ oldest, newest }) : 0,
oldest,
}
} catch (error) {

View File

@@ -26,10 +26,10 @@
"preferGlobal": false,
"main": "dist/",
"engines": {
"node": ">=6"
"node": ">=12"
},
"dependencies": {
"passport-saml": "^2.0.2"
"passport-saml": "^3.2.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -26,9 +26,9 @@
"@babel/plugin-proposal-decorators": "^7.4.0",
"@babel/preset-env": "^7.1.6",
"@iarna/toml": "^2.2.1",
"@vates/decorate-with": "^0.1.0",
"@vates/decorate-with": "^1.0.0",
"@vates/parse-duration": "^0.1.1",
"app-conf": "^0.9.0",
"app-conf": "^1.0.0",
"babel-plugin-lodash": "^3.2.11",
"golike-defer": "^0.5.1",
"jest": "^27.3.1",

View File

@@ -87,6 +87,8 @@ snapshotNameLabelTpl = '[XO Backup {job.name}] {vm.name_label}'
# Delay for which backups listing on a remote is cached
listingDebounce = '1 min'
vhdDirectoryCompression = 'brotli'
[backups.defaultSettings]
reportWhen = 'failure'

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.84.3",
"version": "5.86.3",
"license": "AGPL-3.0-or-later",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -29,28 +29,28 @@
"dependencies": {
"@iarna/toml": "^2.2.1",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^0.1.0",
"@vates/decorate-with": "^1.0.0",
"@vates/disposable": "^0.1.1",
"@vates/multi-key-map": "^0.1.0",
"@vates/parse-duration": "^0.1.1",
"@vates/read-chunk": "^0.1.2",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.16.2",
"@xen-orchestra/backups": "^0.18.3",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/defined": "^0.0.1",
"@xen-orchestra/emit-async": "^0.1.0",
"@xen-orchestra/fs": "^0.19.2",
"@xen-orchestra/fs": "^0.19.3",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.1.1",
"@xen-orchestra/mixins": "^0.1.2",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/template": "^0.1.0",
"@xen-orchestra/xapi": "^0.8.4",
"@xen-orchestra/xapi": "^0.8.5",
"ajv": "^8.0.3",
"app-conf": "^0.9.0",
"app-conf": "^1.0.0",
"async-iterator-to-stream": "^1.0.1",
"base64url": "^3.0.0",
"bind-property-descriptor": "^1.0.0",
"bind-property-descriptor": "^2.0.0",
"blocked-at": "^1.2.0",
"bluebird": "^3.5.1",
"body-parser": "^1.18.2",
@@ -60,14 +60,14 @@
"content-type": "^1.0.4",
"cookie": "^0.4.0",
"cookie-parser": "^1.4.3",
"d3-time-format": "^3.0.0",
"d3-time-format": "^4.1.0",
"decorator-synchronized": "^0.6.0",
"deptree": "^1.0.0",
"exec-promise": "^0.7.0",
"execa": "^5.0.0",
"execa": "^6.0.0",
"express": "^4.16.2",
"express-session": "^1.15.6",
"fast-xml-parser": "^3.17.4",
"fast-xml-parser": "^4.0.0",
"fatfs": "^0.10.4",
"fs-extra": "^10.0.0",
"get-stream": "^6.0.0",
@@ -117,21 +117,21 @@
"source-map-support": "^0.5.16",
"split2": "^4.1.0",
"stoppable": "^1.0.5",
"subleveldown": "^5.0.1",
"subleveldown": "^6.0.1",
"tar-stream": "^2.0.1",
"tmp": "^0.2.1",
"unzipper": "^0.10.5",
"uuid": "^8.3.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^2.0.3",
"vhd-lib": "^3.0.0",
"ws": "^8.2.3",
"xdg-basedir": "^4.0.0",
"xdg-basedir": "^5.1.0",
"xen-api": "^0.35.1",
"xo-acl-resolver": "^0.4.1",
"xo-collection": "^0.5.0",
"xo-common": "^0.7.0",
"xo-remote-parser": "^0.8.0",
"xo-vmdk-to-vhd": "^2.0.1"
"xo-vmdk-to-vhd": "^2.0.3"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

Some files were not shown because too many files have changed in this diff Show More