Compare commits

...

67 Commits

Author SHA1 Message Date
Florent Beauchamp
d6d7e87fe5 fix: remove root need for openVhd.integ.spec.js and merge.integ.spec.js 2021-11-12 11:24:00 +01:00
Florent Beauchamp
00f02c795f feat(vhd-lib): tests shouldn't need root access to run 2021-11-10 14:06:24 +01:00
Julien Fontanet
fb720d9b05 fix(docs/xoa): use wget instead of curl
The version of curl installed on XCP-ng 8.2.0, (curl 7.29.0) does not support any encryption algos available on https://xoa.io
2021-11-09 19:55:49 +01:00
Florent Beauchamp
547d318e55 fix(vhd-lib): write parent locator before the blocks 2021-11-08 18:03:46 +01:00
Florent Beauchamp
cb5a2c18f2 fix(vhd-lib): ensure block allocation table is written after modifying it in tests 2021-11-08 18:03:46 +01:00
Florent Beauchamp
e01ca3ad07 refactor(vhd-lib): use method from test/utils when possible 2021-11-08 18:03:46 +01:00
Florent Beauchamp
314d193f35 fix(vhd-lib): set platform code when setting unique parent locator 2021-11-08 18:03:46 +01:00
Florent Beauchamp
e0200bb730 refactor(vhd-lib): split tests 2021-11-08 18:03:46 +01:00
Florent BEAUCHAMP
2a3f4a6f97 feat(vhd-lib): handle file alias (#5962) 2021-11-08 14:46:00 +01:00
Nicolas Raynaud
88628bbdc0 chore(xo-vmdk-to-vhd): fix tests (#5981)
Introduced by fdf52a3d59

Follow-up of b00750bfa3
2021-11-07 15:38:45 +01:00
Olivier Lambert
cb7b695a72 feat(docs/netbox): add how to add a custom field in Netbox 3 (#5984) 2021-11-07 13:44:02 +01:00
Julien Fontanet
ae549e2a88 fix(jest): dont use fake timers by default
Introduced by 844efb88d

The upgrade to Jest 27 (15630aee5) revealed this issue.
2021-11-05 13:24:51 +01:00
Julien Fontanet
7f9a970714 fix(log/USAGE): document filter array
Introduced by d3cb31f1a
2021-11-04 10:45:58 +01:00
Julien Fontanet
7661d3372d fix(xen-api/USAGE): add httpProxy option
Introduced by 2412f8b1e
2021-11-04 10:38:22 +01:00
Julien Fontanet
dbb4f34015 chore(xapi/VDI_destroy): decorate with retry.wrap()
- more efficient than creating a function at each call
- better logging
2021-11-03 23:10:58 +01:00
Julien Fontanet
8f15a4c29d feat(ISSUE_TEMPLATE/bug_report): add hypervisor version 2021-11-03 16:55:17 +01:00
Florent BEAUCHAMP
1b0a885ac3 feat(vhd-cli): use any remote for copy and compare (#5927) 2021-11-03 15:45:52 +01:00
Nicolas Raynaud
f7195bad88 fix(xo-server): fix ova multipart upload (#5976)
Introduced by 0451aaeb5c
2021-11-02 17:43:45 +01:00
Julien Fontanet
15630aee5e chore: update dev deps 2021-11-02 13:43:49 +01:00
Florent BEAUCHAMP
a950a1fe24 refactor(vhd-lib): centralize test methods (#5968) 2021-11-02 09:53:30 +01:00
Julien Fontanet
71b8e625fe chore: update issue templates (#5974) 2021-10-30 15:06:51 +02:00
Julien Fontanet
e7391675fb feat(@xen-orchestra/proxy): 0.15.2 2021-10-29 17:41:02 +02:00
Julien Fontanet
84fdd3fe4b fix(proxy/api/ndJsonStream): send header for empty iterables
Introduced by ed987e161
2021-10-29 17:05:05 +02:00
Julien Fontanet
4dc4b635f2 feat(@xen-orchestra/proxy): 0.15.1 2021-10-29 15:50:42 +02:00
Julien Fontanet
ee0c6d7f8b feat(xen-api): 0.35.1 2021-10-29 15:50:05 +02:00
Julien Fontanet
a637af395d fix(xen-api): add missing dep proxy-agent
Introduced by 2412f8b1e
2021-10-29 15:40:25 +02:00
Julien Fontanet
59fb612315 feat(@xen-orchestra/proxy): 0.15.0 2021-10-29 15:20:09 +02:00
Mathieu
59b21c7a3e feat: release 5.64 (#5971) 2021-10-29 11:40:16 +02:00
Mathieu
40f881c2ac feat: technical release (#5970) 2021-10-28 16:30:00 +02:00
Rajaa.BARHTAOUI
1d069683ca feat(xo-web/host): manage evacuation failure during host shutdown (#5966) 2021-10-28 14:23:43 +02:00
Julien Fontanet
de1d942b90 fix(xo-server/listPoolsMatchingCriteria): check{Sr,Pool}Name is not a function
Fixes xoa-support#4193

Introduced by cd8c618f0
2021-10-28 13:29:32 +02:00
Rajaa.BARHTAOUI
fc73971d63 feat(xo-server,xo-web/menu): proxy upgrade notification (#5930)
See xoa-support#4105
2021-10-28 10:52:23 +02:00
Rajaa.BARHTAOUI
eb238bf107 feat(xo-web/pool/advanced, xen-api/{get,put}Resource): introduce backup network (#5957) 2021-10-28 10:21:48 +02:00
Florent BEAUCHAMP
2412f8b1e2 feat(xen-api): add HTTP proxy support (#5958)
See #5436

Using an IP address as HTTPS proxy show this warning: `DeprecationWarning: Setting the TLS ServerName to an IP address is not permitted by RFC 6066`

The corresponding issue is there : TooTallNate/node-https-proxy-agent#127
2021-10-27 17:30:41 +02:00
Pierre Donias
0c87dee31c fix(xo-web/xoa): handle string expiration dates (#5967)
See xoa-support#4114
See xoa-support#4192

www-xo may return a string instead of a number in some rare cases
2021-10-27 16:59:59 +02:00
Mathieu
215146f663 feat(xo-web/vm/export): allow to copy the export URL (#5948) 2021-10-27 16:58:09 +02:00
Mathieu
9fe1069df0 feat(xo-web/host): format logs (#5943)
See xoa-support#4100
2021-10-27 15:41:29 +02:00
Julien Fontanet
d2c5b52bf1 feat(backups): enable merge worker by default
Related to 47f9da216

It can still be disabled in case of problems:

```toml
[backups]
disableMergeWorker = true
```
2021-10-27 09:29:50 +02:00
Pierre Donias
12153a414d fix(xo-server/{clone,copy}Vm): force is_a_template to false on the new VM (#5955)
See xoa-support#4137
2021-10-26 16:53:09 +02:00
Pierre Donias
5ec1092a83 fix(xo-server-netbox/test): perform test with a 50-character name (#5963)
See https://xcp-ng.org/forum/topic/5111
See https://netbox.readthedocs.io/en/stable/release-notes/version-2.10/#other-changes > #5011

Versions of Netbox <2.10 only allow cluster type names of length <= 50.
2021-10-26 15:55:11 +02:00
Julien Fontanet
284169a2f2 chore(vhd-lib/VhdAbstract): format with Prettier
Introduced by 7ef89d504
2021-10-25 16:12:49 +02:00
Julien Fontanet
838bfbb75f fix(backups/cleanVm): wait for merge to finish
Introduced by 9c83e70a2
2021-10-25 09:14:38 +02:00
Julien Fontanet
a448da77c9 fix(backups/cleanVm): mergeLimiter support
Introduced by 9c83e70a2
2021-10-25 09:13:58 +02:00
Rajaa.BARHTAOUI
268fb22d5f feat(xo-web/host/advanced): add button to disable/enable host (#5952) 2021-10-20 16:39:54 +02:00
Julien Fontanet
07cc4c853d fix(vhd-lib): fix block table properties & accessors
Fixes #5956

Introduced by 7ef89d504
2021-10-18 23:13:55 +02:00
Florent BEAUCHAMP
c62d727cbe feat(vhd-cli compare): compare metadata and content of two VHDs (#5920) 2021-10-18 16:21:40 +02:00
Florent BEAUCHAMP
7ef89d5043 feat(vhd-{cli,lib}): implement chunking and copy command (#5919) 2021-10-18 14:56:58 +02:00
Mathieu
9ceba1d6e8 feat(xo-web/jobs): add button to copy jobs IDs (#5951)
Useful to create a `job.runSequence` job. Follow-up of #5944.
2021-10-15 14:25:02 +02:00
Pierre Donias
e2e453985f fix(xo-web/job): properly handle array arguments (#5944)
See https://xcp-ng.org/forum/topic/5010

When creating/editing a job, properties of type `array` must not go through the
cross product builder, they must be saved as arrays.
2021-10-15 10:42:33 +02:00
Florent BEAUCHAMP
84dccd800f feat(backups): clean up other schedules snapshots (#5949)
Fixes xoa-support#4129
2021-10-14 14:44:40 +02:00
Julien Fontanet
f9734d202b chore(backups/_VmBackup): remove unused import 2021-10-14 13:51:29 +02:00
Julien Fontanet
d3cb0f4672 feat(xo-server): 5.82.4 2021-10-14 09:47:39 +02:00
Julien Fontanet
c198bbb6fa feat(@xen-orchestra/backups): 0.14.0 2021-10-14 09:45:20 +02:00
Julien Fontanet
c965a89509 feat(xo-server-netbox): 0.3.2 2021-10-14 09:43:38 +02:00
Julien Fontanet
47f9da2160 feat(backups/MixinBackupWriter): use merge worker if not disabled 2021-10-13 16:26:12 +02:00
Julien Fontanet
348a75adb4 feat(backups): merge worker implementation
This CLI must be run directly in the directory where the remote is mounted.

It's only compatible with local remote at the moment.

To start the worker:

```js
const MergeWorker = require('@xen-orchestra/backups/merge-worker/index.js')

await MergeWorker.run(remotePath)
```

To register a VM backup dir to be clean (thus merging its unused VHD), create a file in the queue directory containing the VM UUID:

```
> echo cc700fe2-724e-44a5-8663-5f8f88e05e34 > .queue/clean-vm/20211013T142401Z
```

The queue directory is available as `MergeWorker.CLEAN_VM_QUEUE`.
2021-10-13 16:25:21 +02:00
Julien Fontanet
332218a7f7 feat(backups): move merge responsability to cleanVm 2021-10-13 16:10:19 +02:00
Julien Fontanet
6d7a26d2b9 chore(backups/MixinBackupWriter): use private fields 2021-10-13 10:02:57 +02:00
Pierre Donias
d19a748f0c fix(xo-server-netbox): support older versions of Netbox (#5946)
Fixes #5898
See https://netbox.readthedocs.io/en/stable/release-notes/version-2.7/#api-choice-fields-now-use-string-values-3569
2021-10-13 09:28:46 +02:00
Julien Fontanet
9c83e70a28 feat(backups/RemoteAdapter#cleanVm): configurable merge limiter 2021-10-12 09:17:42 +02:00
Rajaa.BARHTAOUI
abcabb736b feat(xo-web/tasks): filter out short tasks with a default filter (#5941)
See xoa-support#4096
2021-10-08 16:42:16 +02:00
Julien Fontanet
0451aaeb5c fix(xo-server/vm.import): restore non-multipart upload (#5936)
See xoa-support#4085

Introduced by fdf52a3d5

Required by `xo-cli`.
2021-10-08 15:24:21 +02:00
Julien Fontanet
880c45830c fix(xo-cli): http-request-plus@0.12 has no longer default export
Introduced by 62e5ab699
2021-10-07 17:11:54 +02:00
Julien Fontanet
5fa16d2344 chore: format with Prettier 2021-10-07 14:40:41 +02:00
Julien Fontanet
9e50b5dd83 feat(proxy): logging is now dynamically configurable
It was done for xo-server in f20d5cd8d
2021-10-06 16:54:57 +02:00
Julien Fontanet
29d8753574 chore(backups/VmBackup#_selectBaseVm): add debug logs 2021-10-06 16:48:42 +02:00
Pierre Donias
f93e1e1695 feat: release 5.63.0 (#5925) 2021-09-30 15:25:34 +02:00
121 changed files with 4328 additions and 2624 deletions

33
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,33 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- Node: [e.g. 16.12.1]
- xo-server: [e.g. 5.82.3]
- xo-web: [e.g. 5.87.0]
- hypervisor: [e.g. XCP-ng 8.2.0]
**Additional context**
Add any other context about the problem here.

View File

@@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View File

@@ -46,7 +46,7 @@ module.exports = function (pkg, configs = {}) {
return {
comments: !__PROD__,
ignore: __PROD__ ? [/\.spec\.js$/] : undefined,
ignore: __PROD__ ? [/\btests?\//, /\.spec\.js$/] : undefined,
plugins: Object.keys(plugins)
.map(plugin => [plugin, plugins[plugin]])
.sort(([a], [b]) => {

View File

@@ -7,12 +7,12 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.13.0",
"@xen-orchestra/backups": "^0.15.1",
"@xen-orchestra/fs": "^0.18.0",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
"promise-toolbox": "^0.19.2"
"promise-toolbox": "^0.20.0"
},
"engines": {
"node": ">=7.10.1"

View File

@@ -6,7 +6,7 @@ const pDefer = require('promise-toolbox/defer.js')
const pump = require('pump')
const { basename, dirname, join, normalize, resolve } = require('path')
const { createLogger } = require('@xen-orchestra/log')
const { createSyntheticStream, mergeVhd, default: Vhd } = require('vhd-lib')
const { createSyntheticStream, mergeVhd, VhdFile } = require('vhd-lib')
const { deduped } = require('@vates/disposable/deduped.js')
const { execFile } = require('child_process')
const { readdir, stat } = require('fs-extra')
@@ -86,7 +86,7 @@ class RemoteAdapter {
}),
async path => {
try {
const vhd = new Vhd(handler, path)
const vhd = new VhdFile(handler, path)
await vhd.readHeaderAndFooter()
return {
footer: vhd.footer,
@@ -253,16 +253,9 @@ class RemoteAdapter {
async deleteDeltaVmBackups(backups) {
const handler = this._handler
let mergedDataSize = 0
await asyncMapSettled(backups, ({ _filename, vhds }) =>
Promise.all([
handler.unlink(_filename),
asyncMap(Object.values(vhds), async _ => {
mergedDataSize += await this._deleteVhd(resolveRelativeFromFile(_filename, _))
}),
])
)
return mergedDataSize
// unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
}
async deleteMetadataBackup(backupId) {

View File

@@ -1,9 +1,10 @@
const assert = require('assert')
const findLast = require('lodash/findLast.js')
const groupBy = require('lodash/groupBy.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const keyBy = require('lodash/keyBy.js')
const mapValues = require('lodash/mapValues.js')
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const { asyncMap } = require('@xen-orchestra/async-map')
const { createLogger } = require('@xen-orchestra/log')
const { defer } = require('golike-defer')
const { formatDateTime } = require('@xen-orchestra/xapi')
@@ -284,17 +285,28 @@ exports.VmBackup = class VmBackup {
}
async _removeUnusedSnapshots() {
// TODO: handle all schedules (no longer existing schedules default to 0 retention)
const { scheduleId } = this
const scheduleSnapshots = this._jobSnapshots.filter(_ => _.other_config['xo:backup:schedule'] === scheduleId)
const jobSettings = this.job.settings
const baseVmRef = this._baseVm?.$ref
const { config } = this
const baseSettings = {
...config.defaultSettings,
...config.metadata.defaultSettings,
...jobSettings[''],
}
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
const xapi = this._xapi
await asyncMap(getOldEntries(this._settings.snapshotRetention, scheduleSnapshots), ({ $ref }) => {
if ($ref !== baseVmRef) {
return xapi.VM_destroy($ref)
await asyncMap(Object.entries(snapshotsPerSchedule), ([scheduleId, snapshots]) => {
const settings = {
...baseSettings,
...jobSettings[scheduleId],
...jobSettings[this.vm.uuid],
}
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
if ($ref !== baseVmRef) {
return xapi.VM_destroy($ref)
}
})
})
}
@@ -303,12 +315,14 @@ exports.VmBackup = class VmBackup {
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
if (baseVm === undefined) {
debug('no base VM found')
return
}
const fullInterval = this._settings.fullInterval
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
debug('not using base VM becaust fullInterval reached')
return
}
@@ -323,6 +337,10 @@ exports.VmBackup = class VmBackup {
const srcVdi = srcVdis[snapshotOf]
if (srcVdi !== undefined) {
baseUuidToSrcVdi.set(await xapi.getField('VDI', baseRef, 'uuid'), srcVdi)
} else {
debug('no base VDI found', {
vdi: srcVdi.uuid,
})
}
})
@@ -335,7 +353,16 @@ exports.VmBackup = class VmBackup {
const fullVdisRequired = new Set()
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
if (!presentBaseVdis.has(baseUuid)) {
if (presentBaseVdis.has(baseUuid)) {
debug('found base VDI', {
base: baseUuid,
vdi: srcVdi.uuid,
})
} else {
debug('missing base VDI', {
base: baseUuid,
vdi: srcVdi.uuid,
})
fullVdisRequired.add(srcVdi.uuid)
}
})

View File

@@ -1,17 +1,19 @@
const assert = require('assert')
const sum = require('lodash/sum')
const { asyncMap } = require('@xen-orchestra/async-map')
const { default: Vhd, mergeVhd } = require('vhd-lib')
const { VhdFile, mergeVhd } = require('vhd-lib')
const { dirname, resolve } = require('path')
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants.js')
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { Task } = require('./Task.js')
// chain is an array of VHDs from child to parent
//
// the whole chain will be merged into parent, parent will be renamed to child
// and all the others will deleted
const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
assert(chain.length >= 2)
let child = chain[0]
@@ -44,7 +46,7 @@ const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain, {
}
}, 10e3)
await mergeVhd(
const mergedSize = await mergeVhd(
handler,
parent,
handler,
@@ -72,8 +74,10 @@ const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain, {
}
}),
])
return mergedSize
}
})
}
const noop = Function.prototype
@@ -114,7 +118,14 @@ const listVhds = async (handler, vmDir) => {
return { vhds, interruptedVhds }
}
exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, onLog = noop }) {
const defaultMergeLimiter = limitConcurrency(1)
exports.cleanVm = async function cleanVm(
vmDir,
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, onLog = noop }
) {
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
const handler = this._handler
const vhds = new Set()
@@ -126,7 +137,7 @@ exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, on
// remove broken VHDs
await asyncMap(vhdsList.vhds, async path => {
try {
const vhd = new Vhd(handler, path)
const vhd = new VhdFile(handler, path)
await vhd.readHeaderAndFooter(!vhdsList.interruptedVhds.has(path))
vhds.add(path)
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
@@ -279,6 +290,7 @@ exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, on
// TODO: parallelize by vm/job/vdi
const unusedVhdsDeletion = []
const toMerge = []
{
// VHD chains (as list from child to ancestor) to merge indexed by last
// ancestor
@@ -321,22 +333,25 @@ exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, on
})
// merge interrupted VHDs
if (merge) {
vhdsList.interruptedVhds.forEach(parent => {
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
})
}
vhdsList.interruptedVhds.forEach(parent => {
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
})
Object.keys(vhdChainsToMerge).forEach(key => {
const chain = vhdChainsToMerge[key]
Object.values(vhdChainsToMerge).forEach(chain => {
if (chain !== undefined) {
unusedVhdsDeletion.push(mergeVhdChain(chain, { handler, onLog, remove, merge }))
toMerge.push(chain)
}
})
}
const doMerge = () => {
const promise = asyncMap(toMerge, async chain => limitedMergeVhdChain(chain, { handler, onLog, remove, merge }))
return merge ? promise.then(sizes => ({ size: sum(sizes) })) : promise
}
await Promise.all([
...unusedVhdsDeletion,
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
asyncMap(unusedXvas, path => {
onLog(`the XVA ${path} is unused`)
if (remove) {
@@ -355,4 +370,9 @@ exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, on
}
}),
])
return {
// boolean whether some VHDs were merged (or should be merged)
merge: toMerge.length !== 0,
}
}

View File

@@ -0,0 +1,69 @@
#!/usr/bin/env node
const { catchGlobalErrors } = require('@xen-orchestra/log/configure.js')
const { createLogger } = require('@xen-orchestra/log')
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { join } = require('path')
const Disposable = require('promise-toolbox/Disposable')
const min = require('lodash/min')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const { RemoteAdapter } = require('../RemoteAdapter.js')
const { CLEAN_VM_QUEUE } = require('./index.js')
// -------------------------------------------------------------------
catchGlobalErrors(createLogger('xo:backups:mergeWorker'))
const { fatal, info, warn } = createLogger('xo:backups:mergeWorker')
// -------------------------------------------------------------------
const main = Disposable.wrap(async function* main(args) {
const handler = yield getSyncedHandler({ url: 'file://' + process.cwd() })
yield handler.lock(CLEAN_VM_QUEUE)
const adapter = new RemoteAdapter(handler)
const listRetry = async () => {
const timeoutResolver = resolve => setTimeout(resolve, 10e3)
for (let i = 0; i < 10; ++i) {
const entries = await handler.list(CLEAN_VM_QUEUE)
if (entries.length !== 0) {
return entries
}
await new Promise(timeoutResolver)
}
}
let taskFiles
while ((taskFiles = await listRetry()) !== undefined) {
const taskFileBasename = min(taskFiles)
const taskFile = join(CLEAN_VM_QUEUE, '_' + taskFileBasename)
// move this task to the end
await handler.rename(join(CLEAN_VM_QUEUE, taskFileBasename), taskFile)
try {
const vmDir = getVmBackupDir(String(await handler.readFile(taskFile)))
await adapter.cleanVm(vmDir, { merge: true, onLog: info, remove: true })
handler.unlink(taskFile).catch(error => warn('deleting task failure', { error }))
} catch (error) {
warn('failure handling task', { error })
}
}
})
info('starting')
main(process.argv.slice(2)).then(
() => {
info('bye :-)')
},
error => {
fatal(error)
process.exit(1)
}
)

View File

@@ -0,0 +1,25 @@
const { join, resolve } = require('path')
const { spawn } = require('child_process')
const { check } = require('proper-lockfile')
const CLEAN_VM_QUEUE = (exports.CLEAN_VM_QUEUE = '/xo-vm-backups/.queue/clean-vm/')
const CLI_PATH = resolve(__dirname, 'cli.js')
exports.run = async function runMergeWorker(remotePath) {
try {
// TODO: find a way to pass the acquire the lock and then pass it down the worker
if (await check(join(remotePath, CLEAN_VM_QUEUE))) {
// already locked, don't start another worker
return
}
spawn(CLI_PATH, {
cwd: remotePath,
detached: true,
stdio: 'inherit',
}).unref()
} catch (error) {
// we usually don't want to throw if the merge worker failed to start
return error
}
}

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.13.0",
"version": "0.15.1",
"engines": {
"node": ">=14.6"
},
@@ -23,7 +23,7 @@
"@xen-orchestra/fs": "^0.18.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^3.6.0",
"compare-versions": "^4.0.1",
"d3-time-format": "^3.0.0",
"end-of-stream": "^1.4.4",
"fs-extra": "^10.0.0",
@@ -32,13 +32,14 @@
"lodash": "^4.17.20",
"node-zone": "^0.4.0",
"parse-pairs": "^1.1.0",
"promise-toolbox": "^0.19.2",
"promise-toolbox": "^0.20.0",
"proper-lockfile": "^4.1.2",
"pump": "^3.0.0",
"vhd-lib": "^1.2.0",
"vhd-lib": "^1.3.0",
"yazl": "^2.5.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^0.7.0"
"@xen-orchestra/xapi": "^0.8.0"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -3,7 +3,7 @@ const map = require('lodash/map.js')
const mapValues = require('lodash/mapValues.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const { asyncMap } = require('@xen-orchestra/async-map')
const { chainVhd, checkVhdChain, default: Vhd } = require('vhd-lib')
const { chainVhd, checkVhdChain, VhdFile } = require('vhd-lib')
const { createLogger } = require('@xen-orchestra/log')
const { dirname } = require('path')
@@ -38,7 +38,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
try {
await checkVhdChain(handler, path)
const vhd = new Vhd(handler, path)
const vhd = new VhdFile(handler, path)
await vhd.readHeaderAndFooter()
found = found || vhd.footer.uuid.equals(packUuid(baseUuid))
} catch (error) {
@@ -113,19 +113,13 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}
async _deleteOldEntries() {
return Task.run({ name: 'merge' }, async () => {
const adapter = this._adapter
const oldEntries = this._oldEntries
const adapter = this._adapter
const oldEntries = this._oldEntries
let size = 0
// delete sequentially from newest to oldest to avoid unnecessary merges
for (let i = oldEntries.length; i-- > 0; ) {
size += await adapter.deleteDeltaVmBackups([oldEntries[i]])
}
return {
size,
}
})
// delete sequentially from newest to oldest to avoid unnecessary merges
for (let i = oldEntries.length; i-- > 0; ) {
await adapter.deleteDeltaVmBackups([oldEntries[i]])
}
}
async _transfer({ timestamp, deltaExport, sizeContainers }) {
@@ -206,7 +200,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}
// set the correct UUID in the VHD
const vhd = new Vhd(handler, path)
const vhd = new VhdFile(handler, path)
await vhd.readHeaderAndFooter()
vhd.footer.uuid = packUuid(vdi.uuid)
await vhd.readBlockAllocationTable() // required by writeFooter()

View File

@@ -1,34 +1,51 @@
const { createLogger } = require('@xen-orchestra/log')
const { join } = require('path')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const { BACKUP_DIR, getVmBackupDir } = require('../_getVmBackupDir.js')
const MergeWorker = require('../merge-worker/index.js')
const { formatFilenameDate } = require('../_filenameDate.js')
const { warn } = createLogger('xo:backups:MixinBackupWriter')
exports.MixinBackupWriter = (BaseClass = Object) =>
class MixinBackupWriter extends BaseClass {
#lock
#vmBackupDir
constructor({ remoteId, ...rest }) {
super(rest)
this._adapter = rest.backup.remoteAdapters[remoteId]
this._remoteId = remoteId
this._lock = undefined
this.#vmBackupDir = getVmBackupDir(this._backup.vm.uuid)
}
_cleanVm(options) {
return this._adapter
.cleanVm(getVmBackupDir(this._backup.vm.uuid), { ...options, fixMetadata: true, onLog: warn, lock: false })
.cleanVm(this.#vmBackupDir, { ...options, fixMetadata: true, onLog: warn, lock: false })
.catch(warn)
}
async beforeBackup() {
const { handler } = this._adapter
const vmBackupDir = getVmBackupDir(this._backup.vm.uuid)
const vmBackupDir = this.#vmBackupDir
await handler.mktree(vmBackupDir)
this._lock = await handler.lock(vmBackupDir)
this.#lock = await handler.lock(vmBackupDir)
}
async afterBackup() {
await this._cleanVm({ remove: true, merge: true })
await this._lock.dispose()
const { disableMergeWorker } = this._backup.config
const { merge } = await this._cleanVm({ remove: true, merge: disableMergeWorker })
await this.#lock.dispose()
// merge worker only compatible with local remotes
const { handler } = this._adapter
if (merge && !disableMergeWorker && typeof handler._getRealPath === 'function') {
await handler.outputFile(join(MergeWorker.CLEAN_VM_QUEUE, formatFilenameDate(new Date())), this._backup.vm.uuid)
const remotePath = handler._getRealPath()
await MergeWorker.run(remotePath)
}
}
}

View File

@@ -1,4 +1,4 @@
const Vhd = require('vhd-lib').default
const Vhd = require('vhd-lib').VhdFile
exports.checkVhd = async function checkVhd(handler, path) {
await new Vhd(handler, path).readHeaderAndFooter()

View File

@@ -18,7 +18,7 @@
"preferGlobal": true,
"dependencies": {
"golike-defer": "^0.5.1",
"xen-api": "^0.34.3"
"xen-api": "^0.35.1"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -2,6 +2,8 @@
import { createSchedule } from './'
jest.useFakeTimers()
const wrap = value => () => value
describe('issues', () => {

View File

@@ -17,7 +17,7 @@
"node": ">=14"
},
"dependencies": {
"@marsaud/smb2": "^0.17.2",
"@marsaud/smb2": "^0.18.0",
"@sindresorhus/df": "^3.1.1",
"@sullux/aws-sdk": "^1.0.5",
"@vates/coalesce-calls": "^0.1.0",
@@ -29,7 +29,7 @@
"get-stream": "^6.0.0",
"limit-concurrency-decorator": "^0.5.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.19.2",
"promise-toolbox": "^0.20.0",
"proper-lockfile": "^4.1.2",
"readable-stream": "^3.0.6",
"through2": "^4.0.2",

View File

@@ -183,9 +183,21 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
const params = this._createParams(file)
params.Range = `bytes=${position}-${position + buffer.length - 1}`
const result = await this._s3.getObject(params)
result.Body.copy(buffer)
return { bytesRead: result.Body.length, buffer }
try {
const result = await this._s3.getObject(params)
result.Body.copy(buffer)
return { bytesRead: result.Body.length, buffer }
} catch (e) {
if (e.code === 'NoSuchKey') {
if (await this._isNotEmptyDir(file)) {
const error = new Error(`${file} is a directory`)
error.code = 'EISDIR'
error.path = file
throw error
}
}
throw e
}
}
async _rmdir(path) {
@@ -199,6 +211,23 @@ export default class S3Handler extends RemoteHandlerAbstract {
// nothing to do, directories do not exist, they are part of the files' path
}
// reimplement _rmTree to handle efficiantly path with more than 1000 entries in trees
// @todo : use parallel processing for unlink
async _rmTree(path) {
let NextContinuationToken
do {
const result = await this._s3.listObjectsV2({
Bucket: this._bucket,
Prefix: this._dir + path + '/',
ContinuationToken: NextContinuationToken,
})
NextContinuationToken = result.isTruncated ? null : result.NextContinuationToken
for (const path of result.Contents) {
await this._unlink(path)
}
} while (NextContinuationToken !== null)
}
async _write(file, buffer, position) {
if (typeof file !== 'string') {
file = file.fd

View File

@@ -48,6 +48,10 @@ configure([
// if filter is a string, then it is pattern
// (https://github.com/visionmedia/debug#wildcards) which is
// matched against the namespace of the logs
//
// If it's an array, it will be handled as an array of filters
// and the transport will be used if any one of them match the
// current log
filter: process.env.DEBUG,
transport: transportConsole(),

View File

@@ -24,7 +24,7 @@
},
"dependencies": {
"lodash": "^4.17.4",
"promise-toolbox": "^0.19.2"
"promise-toolbox": "^0.20.0"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -23,7 +23,7 @@
"xo-proxy-cli": "dist/index.js"
},
"engines": {
"node": ">=8.10"
"node": ">=12"
},
"dependencies": {
"@iarna/toml": "^2.2.0",
@@ -33,12 +33,12 @@
"content-type": "^1.0.4",
"cson-parser": "^4.0.7",
"getopts": "^2.2.3",
"http-request-plus": "^0.12",
"http-request-plus": "^0.13.0",
"json-rpc-protocol": "^0.13.1",
"promise-toolbox": "^0.19.2",
"promise-toolbox": "^0.20.0",
"pump": "^3.0.0",
"pumpify": "^2.0.1",
"split2": "^3.1.1"
"split2": "^4.1.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -18,6 +18,7 @@ keepAliveInterval = 10e3
#
# https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation
dirMode = 0o700
disableMergeWorker = false
snapshotNameLabelTpl = '[XO Backup {job.name}] {vm.name_label}'
[backups.defaultSettings]
@@ -59,6 +60,13 @@ cert = '/var/lib/xo-proxy/certificate.pem'
key = '/var/lib/xo-proxy/key.pem'
port = 443
[logs]
# Display all logs matching this filter, regardless of their level
#filter = 'xo:backups:*'
# Display all logs with level >=, regardless of their namespace
level = 'info'
[remoteOptions]
mountsDir = '/run/xo-proxy/mounts'

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.14.7",
"version": "0.15.2",
"license": "AGPL-3.0-or-later",
"description": "XO Proxy used to remotely execute backup jobs",
"keywords": [
@@ -31,13 +31,13 @@
"@vates/decorate-with": "^0.1.0",
"@vates/disposable": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.13.0",
"@xen-orchestra/backups": "^0.15.1",
"@xen-orchestra/fs": "^0.18.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.1.1",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/xapi": "^0.7.0",
"@xen-orchestra/xapi": "^0.8.0",
"ajv": "^8.0.3",
"app-conf": "^0.9.0",
"async-iterator-to-stream": "^1.1.0",
@@ -54,11 +54,11 @@
"lodash": "^4.17.10",
"node-zone": "^0.4.0",
"parse-pairs": "^1.0.0",
"promise-toolbox": "^0.19.2",
"promise-toolbox": "^0.20.0",
"source-map-support": "^0.5.16",
"stoppable": "^1.0.6",
"xdg-basedir": "^4.0.0",
"xen-api": "^0.34.3",
"xen-api": "^0.35.1",
"xo-common": "^0.7.0"
},
"devDependencies": {

View File

@@ -14,25 +14,30 @@ import { createLogger } from '@xen-orchestra/log'
const { debug, warn } = createLogger('xo:proxy:api')
const ndJsonStream = asyncIteratorToStream(async function* (responseId, iterable) {
let headerSent = false
const ndJsonStream = asyncIteratorToStream(async function*(responseId, iterable) {
try {
for await (const data of iterable) {
if (!headerSent) {
yield format.response(responseId, { $responseType: 'ndjson' }) + '\n'
headerSent = true
}
let cursor, iterator
try {
const getIterator = iterable[Symbol.iterator] ?? iterable[Symbol.asyncIterator]
iterator = getIterator.call(iterable)
cursor = await iterator.next()
yield format.response(responseId, { $responseType: 'ndjson' }) + '\n'
} catch (error) {
yield format.error(responseId, error)
throw error
}
while (!cursor.done) {
try {
yield JSON.stringify(data) + '\n'
yield JSON.stringify(cursor.value) + '\n'
} catch (error) {
warn('ndJsonStream, item error', { error })
}
cursor = await iterator.next()
}
} catch (error) {
warn('ndJsonStream, fatal error', { error })
if (!headerSent) {
yield format.error(responseId, error)
}
}
})
@@ -47,7 +52,7 @@ export default class Api {
ctx.req.setTimeout(0)
const profile = await app.authentication.findProfile({
authenticationToken: ctx.cookies.get('authenticationToken'),
authenticationToken: ctx.cookies.get('authenticationToken')
})
if (profile === undefined) {
ctx.status = 401
@@ -118,7 +123,7 @@ export default class Api {
this.addMethods({
system: {
getMethodsInfo: [
function* () {
function*() {
const methods = this._methods
for (const name in methods) {
const { description, params = {} } = methods[name]
@@ -126,25 +131,25 @@ export default class Api {
}
}.bind(this),
{
description: 'returns the signatures of all available API methods',
},
description: 'returns the signatures of all available API methods'
}
],
getServerVersion: [
() => appVersion,
{
description: 'returns the version of xo-server',
},
description: 'returns the version of xo-server'
}
],
listMethods: [
function* () {
function*() {
const methods = this._methods
for (const name in methods) {
yield name
}
}.bind(this),
{
description: 'returns the name of all available API methods',
},
description: 'returns the name of all available API methods'
}
],
methodSignature: [
({ method: name }) => {
@@ -159,14 +164,14 @@ export default class Api {
{
description: 'returns the signature of an API method',
params: {
method: { type: 'string' },
},
},
],
method: { type: 'string' }
}
}
]
},
test: {
range: [
function* ({ start = 0, stop, step }) {
function*({ start = 0, stop, step }) {
if (step === undefined) {
step = start > stop ? -1 : 1
}
@@ -184,11 +189,11 @@ export default class Api {
params: {
start: { optional: true, type: 'number' },
step: { optional: true, type: 'number' },
stop: { type: 'number' },
},
},
],
},
stop: { type: 'number' }
}
}
]
}
})
}
@@ -215,7 +220,7 @@ export default class Api {
return required
}),
type: 'object',
type: 'object'
})
const m = params => {

View File

@@ -0,0 +1,17 @@
import transportConsole from '@xen-orchestra/log/transports/console.js'
import { configure } from '@xen-orchestra/log/configure.js'
export default class Logs {
constructor(app) {
const transport = transportConsole()
app.config.watch('logs', ({ filter, level }) => {
configure([
{
filter: [process.env.DEBUG, filter],
level,
transport,
},
])
})
}
}

View File

@@ -35,7 +35,7 @@
"form-data": "^4.0.0",
"fs-extra": "^10.0.0",
"get-stream": "^6.0.0",
"http-request-plus": "^0.12",
"http-request-plus": "^0.13.0",
"human-format": "^0.11.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/xapi",
"version": "0.7.0",
"version": "0.8.0",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
@@ -25,7 +25,7 @@
"xo-common": "^0.7.0"
},
"peerDependencies": {
"xen-api": "^0.34.3"
"xen-api": "^0.35.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
@@ -44,7 +44,7 @@
"d3-time-format": "^3.0.0",
"golike-defer": "^0.5.1",
"lodash": "^4.17.15",
"promise-toolbox": "^0.19.2"
"promise-toolbox": "^0.20.0"
},
"private": false,
"license": "AGPL-3.0-or-later",

View File

@@ -1,6 +1,7 @@
const CancelToken = require('promise-toolbox/CancelToken.js')
const pCatch = require('promise-toolbox/catch.js')
const pRetry = require('promise-toolbox/retry.js')
const { decorateWith } = require('@vates/decorate-with')
const extractOpaqueRef = require('./_extractOpaqueRef.js')
@@ -11,10 +12,13 @@ module.exports = class Vdi {
return extractOpaqueRef(await this.callAsync('VDI.clone', vdiRef))
}
// work around a race condition in XCP-ng/XenServer where the disk is not fully unmounted yet
@decorateWith(pRetry.wrap, function () {
return this._vdiDestroyRetryWhenInUse
})
async destroy(vdiRef) {
await pCatch.call(
// work around a race condition in XCP-ng/XenServer where the disk is not fully unmounted yet
pRetry(() => this.callAsync('VDI.destroy', vdiRef), this._vdiDestroyRetryWhenInUse),
this.callAsync('VDI.destroy', vdiRef),
// if this VDI is not found, consider it destroyed
{ code: 'HANDLE_INVALID' },
noop

View File

@@ -1,18 +1,63 @@
## **5.64.0** (2021-10-29)
# ChangeLog
## **next**
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
## Highlights
- [Netbox] Support older versions of Netbox and prevent "active is not a valid choice" error [#5898](https://github.com/vatesfr/xen-orchestra/issues/5898) (PR [#5946](https://github.com/vatesfr/xen-orchestra/pull/5946))
- [Tasks] Filter out short tasks using a default filter (PR [#5921](https://github.com/vatesfr/xen-orchestra/pull/5921))
- [Host] Handle evacuation failure during host shutdown (PR [#5966](https://github.com/vatesfr/xen-orchestra/pull/#5966))
- [Menu] Notify user when proxies need to be upgraded (PR [#5930](https://github.com/vatesfr/xen-orchestra/pull/5930))
- [Servers] Ability to use an HTTP proxy between XO and a server (PR [#5958](https://github.com/vatesfr/xen-orchestra/pull/5958))
- [VM/export] Ability to copy the export URL (PR [#5948](https://github.com/vatesfr/xen-orchestra/pull/5948))
- [Pool/advanced] Ability to define network for importing/exporting VMs/VDIs (PR [#5957](https://github.com/vatesfr/xen-orchestra/pull/5957))
- [Host/advanced] Add button to enable/disable the host (PR [#5952](https://github.com/vatesfr/xen-orchestra/pull/5952))
- [Backups] Enable merge worker by default
### Enhancements
- [Jobs] Ability to copy a job ID (PR [#5951](https://github.com/vatesfr/xen-orchestra/pull/5951))
### Bug fixes
- [Backups] Delete unused snapshots related to other schedules (even no longer existing) (PR [#5949](https://github.com/vatesfr/xen-orchestra/pull/5949))
- [Jobs] Fix `job.runSequence` method (PR [#5944](https://github.com/vatesfr/xen-orchestra/pull/5944))
- [Netbox] Fix error when testing plugin on versions older than 2.10 (PR [#5963](https://github.com/vatesfr/xen-orchestra/pull/5963))
- [Snapshot] Fix "Create VM from snapshot" creating a template instead of a VM (PR [#5955](https://github.com/vatesfr/xen-orchestra/pull/5955))
- [Host/Logs] Improve the display of log content (PR [#5943](https://github.com/vatesfr/xen-orchestra/pull/5943))
- [XOA licenses] Fix expiration date displaying "Invalid date" in some rare cases (PR [#5967](https://github.com/vatesfr/xen-orchestra/pull/5967))
- [API/pool.listPoolsMatchingCriteria] Fix `checkSrName`/`checkPoolName` `is not a function` error
### Released packages
- xo-server-netbox 0.3.3
- vhd-lib 1.3.0
- xen-api 0.35.1
- @xen-orchestra/xapi 0.8.0
- @xen-orchestra/backups 0.15.1
- @xen-orchestra/proxy 0.15.2
- vhd-cli 0.5.0
- xapi-explore-sr 0.4.0
- xo-server 5.83.0
- xo-web 5.89.0
## **5.63.0** (2021-09-30)
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
### Highlights
- [Backup] Go back to previous page instead of going to the overview after editing a job: keeps current filters and page (PR [#5913](https://github.com/vatesfr/xen-orchestra/pull/5913))
- [Health] Do not take into consideration duplicated MAC addresses from CR VMs (PR [#5916](https://github.com/vatesfr/xen-orchestra/pull/5916))
- [Health] Ability to filter duplicated MAC addresses by running VMs (PR [#5917](https://github.com/vatesfr/xen-orchestra/pull/5917))
- [Tables] Move the search bar and pagination to the top of the table (PR [#5914](https://github.com/vatesfr/xen-orchestra/pull/5914))
- [Netbox] Handle nested prefixes by always assigning an IP to the smallest prefix it matches (PR [#5908](https://github.com/vatesfr/xen-orchestra/pull/5908))
### Bug fixes
- [SSH keys] Allow SSH key to be broken anywhere to avoid breaking page formatting (Thanks [@tstivers1990](https://github.com/tstivers1990)!) [#5891](https://github.com/vatesfr/xen-orchestra/issues/5891) (PR [#5892](https://github.com/vatesfr/xen-orchestra/pull/5892))
- [Netbox] Handle nested prefixes by always assigning an IP to the smallest prefix it matches (PR [#5908](https://github.com/vatesfr/xen-orchestra/pull/5908))
- [Netbox] Better handling and error messages when encountering issues due to UUID custom field not being configured correctly [#5905](https://github.com/vatesfr/xen-orchestra/issues/5905) [#5806](https://github.com/vatesfr/xen-orchestra/issues/5806) [#5834](https://github.com/vatesfr/xen-orchestra/issues/5834) (PR [#5909](https://github.com/vatesfr/xen-orchestra/pull/5909))
- [New VM] Don't send network config if untouched as all commented config can make Cloud-init fail [#5918](https://github.com/vatesfr/xen-orchestra/issues/5918) (PR [#5923](https://github.com/vatesfr/xen-orchestra/pull/5923))
@@ -42,8 +87,6 @@
## **5.62.0** (2021-08-31)
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
### Highlights
- [Host] Add warning in case of unmaintained host version [#5840](https://github.com/vatesfr/xen-orchestra/issues/5840) (PR [#5847](https://github.com/vatesfr/xen-orchestra/pull/5847))
@@ -76,8 +119,6 @@
## **5.61.0** (2021-07-30)
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
### Highlights
- [SR/disks] Display base copies' active VDIs (PR [#5826](https://github.com/vatesfr/xen-orchestra/pull/5826))
@@ -150,7 +191,7 @@
- [Smart backup] Report missing pools [#2844](https://github.com/vatesfr/xen-orchestra/issues/2844) (PR [#5768](https://github.com/vatesfr/xen-orchestra/pull/5768))
- [Metadata Backup] Add a warning on restoring a metadata backup (PR [#5769](https://github.com/vatesfr/xen-orchestra/pull/5769))
- [Netbox] [Plugin](https://xen-orchestra.com/docs/advanced.html#netbox) to synchronize pools, VMs and IPs with [Netbox](https://netbox.readthedocs.io/en/stable/) (PR [#5783](https://github.com/vatesfr/xen-orchestra/pull/5783))
- [Netbox][plugin](https://xen-orchestra.com/docs/advanced.html#netbox) to synchronize pools, VMs and IPs with [Netbox](https://netbox.readthedocs.io/en/stable/) (PR [#5783](https://github.com/vatesfr/xen-orchestra/pull/5783))
### Enhancements

View File

@@ -11,6 +11,8 @@
> Users must be able to say: “I had this issue, happy to know it's fixed”
[Import/VM] Fix the import of OVA files (PR [#5976](https://github.com/vatesfr/xen-orchestra/pull/5976))
### Packages to release
> Packages will be released in the order they are here, therefore, they should
@@ -27,3 +29,8 @@
> - major: if the change breaks compatibility
>
> In case of conflict, the highest (lowest in previous list) `$version` wins.
- @xen-orchestra/fs minor
- vhd-lib minor
- xo-server patch
- vhd-cli minor

View File

@@ -1,27 +0,0 @@
<!--
Welcome to the issue section of Xen Orchestra!
Here you can:
- report an issue
- propose an enhancement
- ask a question
Please, respect this template as much as possible, it helps us sort
the issues :)
-->
### Context
- **XO origin**: the sources / XO Appliance
- **Versions**:
- Node: **FILL HERE**
- xo-web: **FILL HERE**
- xo-server: **FILL HERE**
### Expected behavior
<!-- What you expect to happen -->
### Current behavior
<!-- What is actually happening -->

View File

@@ -327,6 +327,8 @@ Synchronize your pools, VMs, network interfaces and IP addresses with your [Netb
![](./assets/netbox.png)
### Netbox side
- Go to your Netbox interface
- Configure prefixes:
- Go to IPAM > Prefixes > Add
@@ -339,13 +341,19 @@ XO will try to find the right prefix for each IP address. If it can't find a pre
- Generate a token:
- Go to Admin > Tokens > Add token
- Create a token with "Write enabled"
- Add a UUID custom field:
- Add a UUID custom field (for **Netbox 2.x**):
- Got to Admin > Custom fields > Add custom field
- Create a custom field called "uuid" (lower case!)
- Assign it to object types `virtualization > cluster` and `virtualization > virtual machine`
![](./assets/customfield.png)
:::tip
In Netbox 3.x, custom fields can be found directly in the site (no need to go in the admin section). It's available in "Other/Customization/Custom Fields". After creation of the `uuid` field, assign it to the object types `virtualization > cluster` and `virtualization > virtual machine`.
:::
### In Xen Orchestra
- Go to Xen Orchestra > Settings > Plugins > Netbox and fill out the configuration:
- Endpoint: the URL of your Netbox instance (e.g.: `https://netbox.company.net`)
- Unauthorized certificate: only for HTTPS, enable this option if your Netbox instance uses a self-signed SSL certificate

View File

@@ -286,8 +286,9 @@ When it's done exporting, we'll remove the snapshot. Note: this operation will t
Concurrency is a parameter that let you define how many VMs your backup job will manage simultaneously.
:::tip
- Default concurrency value is 2 if left empty.
:::
:::
Let's say you want to backup 50 VMs (each with 1x disk) at 3:00 AM. There are **2 different strategies**:
@@ -297,7 +298,7 @@ Let's say you want to backup 50 VMs (each with 1x disk) at 3:00 AM. There are **
The first purely sequential strategy will lead to the fact that: **you can't predict when a snapshot of your data will occur**. Because you can't predict the first VM export time (let's say 3 hours), then your second VM will have its snapshot taken 3 hours later, at 6 AM.
:::tip
If you need your backup to be done at a specific time you should consider creating a specific backup task for this VM.
If you need your backup to be done at a specific time you should consider creating a specific backup task for this VM.
:::
Strategy number 2 is to parallelise: all the snapshots will be taken at 3 AM. However **it's risky without limits**: it means potentially doing 50 snapshots or more at once on the same storage. **Since XenServer doesn't have a queue**, it will try to do all of them at once. This is also prone to race conditions and could cause crashes on your storage.
@@ -312,6 +313,7 @@ You should be aware of your hardware limitation when defining the best concurren
The best way to define the best concurrency for you is by increasing it slowly and watching the result on backup time.
So to summarize, if you set your concurrency at 6 and you have 20 Vms to backup the process will be the following:
- We start the backup of the first 6 VMs.
- When one VM backup as ended we will launch the next VM backup.
- We're keep launching new VM backup until the 20 VMs are finished, keeping 6 backups running.

View File

@@ -61,7 +61,7 @@ Please only use this if you have issues with [the default way to deploy XOA](ins
Alternatively, you can deploy it by connecting to your XenServer host and executing the following:
```
bash -c "$(curl -sS https://xoa.io/deploy)"
bash -c "$(wget -qO- https://xoa.io/deploy)"
```
:::tip
@@ -78,7 +78,7 @@ curl: (35) error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protoc
It means that the secure HTTPS protocol is not supported, you can bypass this using the unsecure command instead:
```
bash -c "$(curl -sS http://xoa.io/deploy)"
bash -c "$(wget -qO- http://xoa.io/deploy)"
```
:::
@@ -106,6 +106,7 @@ In that case, you already set the password for `xoa` user. If you forgot it, see
If you connect via SSH or console for the first time without using our [web deploy form](https://xen-orchestra.com/#!/xoa), be aware **there's NO default password set for security reasons**. To set it, you need to connect to your host to find the XOA VM UUID (eg via `xe vm-list`).
Then replace `<UUID>` with the previously find UUID, and `<password>` with your password:
```
xe vm-param-set uuid=<UUID> xenstore-data:vm-data/system-account-xoa-password=<password>
```

View File

@@ -318,7 +318,7 @@ XOSAN is a 100% software defined solution for XenServer hyperconvergence. You ca
You will need to be registered on our website in order to use Xen Orchestra. If you are not yet registered, [here is the way](https://xen-orchestra.com/#!/signup)
SSH in your XenServer and use the command line `bash -c "$(curl -sS https://xoa.io/deploy)"` - it will deploy Xen Orchestra Appliance on your XenServer infrastructure which is required to use XOSAN.
SSH in your XenServer and use the command line `bash -c "$(wget -qO- https://xoa.io/deploy)"` - it will deploy Xen Orchestra Appliance on your XenServer infrastructure which is required to use XOSAN.
> Note: You can also download the XVA file and follow [these instructions](https://xen-orchestra.com/docs/xoa.html#the-alternative).

View File

@@ -3,7 +3,7 @@
"@babel/core": "^7.0.0",
"@babel/eslint-parser": "^7.13.8",
"@babel/register": "^7.0.0",
"babel-jest": "^26.0.1",
"babel-jest": "^27.3.1",
"benchmark": "^2.1.4",
"eslint": "^7.6.0",
"eslint-config-prettier": "^8.1.0",
@@ -18,11 +18,11 @@
"globby": "^11.0.1",
"handlebars": "^4.7.6",
"husky": "^4.2.5",
"jest": "^26.0.1",
"jest": "^27.3.1",
"lint-staged": "^11.1.2",
"lodash": "^4.17.4",
"prettier": "^2.0.5",
"promise-toolbox": "^0.19.2",
"promise-toolbox": "^0.20.0",
"sorted-object": "^2.0.1",
"vuepress": "^1.4.1"
},
@@ -61,8 +61,7 @@
"/xo-server-test/",
"/xo-web/"
],
"testRegex": "\\.spec\\.js$",
"timers": "fake"
"testRegex": "\\.spec\\.js$"
},
"lint-staged": {
"*.{md,ts,ts}": "prettier --write"

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "vhd-cli",
"version": "0.4.0",
"version": "0.5.0",
"license": "ISC",
"description": "Tools to read/create and merge VHD files",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-cli",
@@ -28,7 +28,7 @@
"cli-progress": "^3.1.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"vhd-lib": "^1.2.0"
"vhd-lib": "^1.3.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -37,7 +37,7 @@
"cross-env": "^7.0.2",
"execa": "^5.0.0",
"index-modules": "^0.4.3",
"promise-toolbox": "^0.19.2",
"promise-toolbox": "^0.20.0",
"rimraf": "^3.0.0",
"tmp": "^0.2.1"
},

View File

@@ -1,9 +1,9 @@
import Vhd, { checkVhdChain } from 'vhd-lib'
import { VhdFile, checkVhdChain } from 'vhd-lib'
import getopts from 'getopts'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
const checkVhd = (handler, path) => new Vhd(handler, path).readHeaderAndFooter()
const checkVhd = (handler, path) => new VhdFile(handler, path).readHeaderAndFooter()
export default async rawArgs => {
const { chain, _: args } = getopts(rawArgs, {

View File

@@ -0,0 +1,81 @@
import { getSyncedHandler } from '@xen-orchestra/fs'
import { openVhd, Constants } from 'vhd-lib'
import Disposable from 'promise-toolbox/Disposable'
import omit from 'lodash/omit'
const deepCompareObjects = function (src, dest, path) {
for (const key of Object.keys(src)) {
const srcValue = src[key]
const destValue = dest[key]
if (srcValue !== destValue) {
const srcType = typeof srcValue
const destType = typeof destValue
if (srcType !== destType) {
throw new Error(`key ${path + '/' + key} is of type *${srcType}* in source and *${destType}* in dest`)
}
if (srcType !== 'object') {
throw new Error(`key ${path + '/' + key} is *${srcValue}* in source and *${destValue}* in dest`)
}
if (Buffer.isBuffer(srcValue)) {
if (!(Buffer.isBuffer(destValue) && srcValue.equals(destValue))) {
throw new Error(`key ${path + '/' + key} is buffer in source that does not equal dest`)
}
} else {
deepCompareObjects(src[key], dest[key], path + '/' + key)
}
}
}
}
export default async args => {
if (args.length < 4 || args.some(_ => _ === '-h' || _ === '--help')) {
return `Usage: compare <sourceRemoteUrl> <source VHD> <destionationRemoteUrl> <destination> `
}
const [sourceRemoteUrl, sourcePath, destRemoteUrl, destPath] = args
await Disposable.use(async function* () {
const sourceHandler = yield getSyncedHandler({ url: sourceRemoteUrl })
const src = yield openVhd(sourceHandler, sourcePath)
const destHandler = yield getSyncedHandler({ url: destRemoteUrl })
const dest = yield openVhd(destHandler, destPath)
// parent locator entries contains offset that can be different without impacting the vhd
// we'll compare them later
// table offset and checksum are also implementation specific
const ignoredEntries = ['checksum', 'parentLocatorEntry', 'tableOffset']
deepCompareObjects(omit(src.header, ignoredEntries), omit(dest.header, ignoredEntries), 'header')
deepCompareObjects(src.footer, dest.footer, 'footer')
await src.readBlockAllocationTable()
await dest.readBlockAllocationTable()
for (let i = 0; i < src.header.maxTableEntries; i++) {
if (src.containsBlock(i)) {
if (dest.containsBlock(i)) {
const srcBlock = await src.readBlock(i)
const destBlock = await dest.readBlock(i)
if (!srcBlock.buffer.equals(destBlock.buffer)) {
throw new Error(`Block ${i} has different data in src and dest`)
}
} else {
throw new Error(`Block ${i} is present in source but not in dest `)
}
} else if (dest.containsBlock(i)) {
throw new Error(`Block ${i} is present in dest but not in source `)
}
}
for (let parentLocatorId = 0; parentLocatorId < Constants.PARENT_LOCATOR_ENTRIES; parentLocatorId++) {
const srcParentLocator = await src.readParentLocator(parentLocatorId)
const destParentLocator = await dest.readParentLocator(parentLocatorId)
if (!srcParentLocator.data || !srcParentLocator.data.equals(destParentLocator.data)) {
console.log(srcParentLocator, destParentLocator)
throw new Error(`Parent Locator ${parentLocatorId} has different data in src and dest`)
}
}
console.log('there is no difference between theses vhd')
})
}

View File

@@ -0,0 +1,50 @@
import { getSyncedHandler } from '@xen-orchestra/fs'
import { openVhd, VhdFile, VhdDirectory } from 'vhd-lib'
import Disposable from 'promise-toolbox/Disposable'
import getopts from 'getopts'
export default async rawArgs => {
const {
directory,
help,
_: args,
} = getopts(rawArgs, {
alias: {
directory: 'd',
help: 'h',
},
boolean: ['directory', 'force'],
default: {
directory: false,
help: false,
},
})
if (args.length < 4 || help) {
return `Usage: index.js copy <sourceRemoteUrl> <source VHD> <destionationRemoteUrl> <destination> --directory`
}
const [sourceRemoteUrl, sourcePath, destRemoteUrl, destPath] = args
await Disposable.use(async function* () {
const sourceHandler = yield getSyncedHandler({ url: sourceRemoteUrl })
const src = yield openVhd(sourceHandler, sourcePath)
await src.readBlockAllocationTable()
const destHandler = yield getSyncedHandler({ url: destRemoteUrl })
const dest = yield directory ? VhdDirectory.create(destHandler, destPath) : VhdFile.create(destHandler, destPath)
// copy data
dest.header = src.header
dest.footer = src.footer
for await (const block of src.blocks()) {
await dest.writeEntireBlock(block)
}
// copy parent locators
for (let parentLocatorId = 0; parentLocatorId < 8; parentLocatorId++) {
const parentLocator = await src.readParentLocator(parentLocatorId)
await dest.writeParentLocator(parentLocator)
}
await dest.writeFooter()
await dest.writeHeader()
await dest.writeBlockAllocationTable()
})
}

View File

@@ -1,9 +1,9 @@
import Vhd from 'vhd-lib'
import { VhdFile } from 'vhd-lib'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
export default async args => {
const vhd = new Vhd(getHandler({ url: 'file:///' }), resolve(args[0]))
const vhd = new VhdFile(getHandler({ url: 'file:///' }), resolve(args[0]))
try {
await vhd.readHeaderAndFooter()

View File

@@ -2,7 +2,7 @@ import { asCallback, fromCallback, fromEvent } from 'promise-toolbox'
import { getHandler } from '@xen-orchestra/fs'
import { relative } from 'path'
import { start as createRepl } from 'repl'
import Vhd, * as vhdLib from 'vhd-lib'
import * as vhdLib from 'vhd-lib'
export default async args => {
const cwd = process.cwd()
@@ -14,7 +14,7 @@ export default async args => {
})
Object.assign(repl.context, vhdLib)
repl.context.handler = handler
repl.context.open = path => new Vhd(handler, relative(cwd, path))
repl.context.open = path => new vhdLib.VhdFile(handler, relative(cwd, path))
// Make the REPL waits for promise completion.
repl.eval = (evaluate => (cmd, context, filename, cb) => {

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "vhd-lib",
"version": "1.2.0",
"version": "1.3.0",
"license": "AGPL-3.0-or-later",
"description": "Primitives for VHD file handling",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
@@ -22,7 +22,7 @@
"fs-extra": "^10.0.0",
"limit-concurrency-decorator": "^0.5.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.19.2",
"promise-toolbox": "^0.20.0",
"struct-fu": "^1.2.0",
"uuid": "^8.3.1"
},

View File

@@ -0,0 +1,138 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import fs from 'fs-extra'
import { getSyncedHandler } from '@xen-orchestra/fs'
import { Disposable, pFromCallback } from 'promise-toolbox'
import { openVhd } from '../index'
import { createRandomFile, convertFromRawToVhd, createRandomVhdDirectory } from '../tests/utils'
import { VhdAbstract } from './VhdAbstract'
let tempDir
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('It creates an alias', async () => {
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file://' + tempDir })
const aliasPath = `alias/alias.alias.vhd`
const aliasFsPath = `${tempDir}/${aliasPath}`
await fs.mkdirp(`${tempDir}/alias`)
const testOneCombination = async ({ targetPath, targetContent }) => {
await VhdAbstract.createAlias(handler, aliasPath, targetPath)
// alias file is created
expect(await fs.exists(aliasFsPath)).toEqual(true)
// content is the target path relative to the alias location
const content = await fs.readFile(aliasFsPath, 'utf-8')
expect(content).toEqual(targetContent)
// create alias fails if alias already exists, remove it before next loop step
await fs.unlink(aliasFsPath)
}
const combinations = [
{ targetPath: `targets.vhd`, targetContent: `../targets.vhd` },
{ targetPath: `alias/targets.vhd`, targetContent: `targets.vhd` },
{ targetPath: `alias/sub/targets.vhd`, targetContent: `sub/targets.vhd` },
{ targetPath: `sibling/targets.vhd`, targetContent: `../sibling/targets.vhd` },
]
for (const { targetPath, targetContent } of combinations) {
await testOneCombination({ targetPath, targetContent })
}
})
})
test('alias must have *.alias.vhd extension', async () => {
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const aliasPath = 'invalidalias.vhd'
const targetPath = 'targets.vhd'
expect(async () => await VhdAbstract.createAlias(handler, aliasPath, targetPath)).rejects.toThrow()
expect(await fs.exists(aliasPath)).toEqual(false)
})
})
test('alias must not be chained', async () => {
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const aliasPath = 'valid.alias.vhd'
const targetPath = 'an.other.valid.alias.vhd'
expect(async () => await VhdAbstract.createAlias(handler, aliasPath, targetPath)).rejects.toThrow()
expect(await fs.exists(aliasPath)).toEqual(false)
})
})
test('It rename and unlink a VHDFile', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
await createRandomFile(rawFileName, initalSize)
await convertFromRawToVhd(rawFileName, `${tempDir}/randomfile.vhd`)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const { size } = await fs.stat(`${tempDir}/randomfile.vhd`)
await VhdAbstract.rename(handler, 'randomfile.vhd', 'renamed.vhd')
expect(await fs.exists(`${tempDir}/randomfile.vhd`)).toEqual(false)
const { size: renamedSize } = await fs.stat(`${tempDir}/renamed.vhd`)
expect(size).toEqual(renamedSize)
await VhdAbstract.unlink(handler, 'renamed.vhd')
expect(await fs.exists(`${tempDir}/renamed.vhd`)).toEqual(false)
})
})
test('It rename and unlink a VhdDirectory', async () => {
const initalSize = 4
const vhdDirectory = `${tempDir}/randomfile.dir`
await createRandomVhdDirectory(vhdDirectory, initalSize)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const vhd = yield openVhd(handler, 'randomfile.dir')
expect(vhd.header.cookie).toEqual('cxsparse')
expect(vhd.footer.cookie).toEqual('conectix')
await VhdAbstract.rename(handler, 'randomfile.dir', 'renamed.vhd')
expect(await fs.exists(`${tempDir}/randomfile.dir`)).toEqual(false)
await VhdAbstract.unlink(handler, `renamed.vhd`)
expect(await fs.exists(`${tempDir}/renamed.vhd`)).toEqual(false)
})
})
test('It create , rename and unlink alias', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
await createRandomFile(rawFileName, initalSize)
const vhdFileName = `${tempDir}/randomfile.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const aliasFileName = `${tempDir}/aliasFileName.alias.vhd`
const aliasFileNameRenamed = `${tempDir}/aliasFileNameRenamed.alias.vhd`
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
await VhdAbstract.createAlias(handler, 'aliasFileName.alias.vhd', 'randomfile.vhd')
expect(await fs.exists(aliasFileName)).toEqual(true)
expect(await fs.exists(vhdFileName)).toEqual(true)
await VhdAbstract.rename(handler, 'aliasFileName.alias.vhd', 'aliasFileNameRenamed.alias.vhd')
expect(await fs.exists(aliasFileName)).toEqual(false)
expect(await fs.exists(vhdFileName)).toEqual(true)
expect(await fs.exists(aliasFileNameRenamed)).toEqual(true)
await VhdAbstract.unlink(handler, 'aliasFileNameRenamed.alias.vhd')
expect(await fs.exists(aliasFileName)).toEqual(false)
expect(await fs.exists(vhdFileName)).toEqual(false)
expect(await fs.exists(aliasFileNameRenamed)).toEqual(false)
})
})

View File

@@ -0,0 +1,207 @@
import { computeBatSize, sectorsRoundUpNoZero, sectorsToBytes } from './_utils'
import { PLATFORM_NONE, SECTOR_SIZE, PLATFORM_W2KU, PARENT_LOCATOR_ENTRIES } from '../_constants'
import { resolveAlias, isVhdAlias } from '../_resolveAlias'
import assert from 'assert'
import path from 'path'
export class VhdAbstract {
#header
bitmapSize
footer
fullBlockSize
sectorsOfBitmap
sectorsPerBlock
get header() {
assert.notStrictEqual(this.#header, undefined, `header must be read before it's used`)
return this.#header
}
set header(header) {
this.#header = header
this.sectorsPerBlock = header.blockSize / SECTOR_SIZE
this.sectorsOfBitmap = sectorsRoundUpNoZero(this.sectorsPerBlock >> 3)
this.fullBlockSize = sectorsToBytes(this.sectorsOfBitmap + this.sectorsPerBlock)
this.bitmapSize = sectorsToBytes(this.sectorsOfBitmap)
}
/**
* instantiate a Vhd
*
* @returns {AbstractVhd}
*/
static async open() {
throw new Error('open not implemented')
}
/**
* Check if this vhd contains a block with id blockId
* Must be called after readBlockAllocationTable
*
* @param {number} blockId
* @returns {boolean}
*
*/
containsBlock(blockId) {
throw new Error(`checking if this vhd contains the block ${blockId} is not implemented`)
}
/**
* Read the header and the footer
* check their integrity
* if checkSecondFooter also checks that the footer at the end is equal to the one at the beginning
*
* @param {boolean} checkSecondFooter
*/
readHeaderAndFooter(checkSecondFooter = true) {
throw new Error(
`reading and checking footer, ${checkSecondFooter ? 'second footer,' : ''} and header is not implemented`
)
}
readBlockAllocationTable() {
throw new Error(`reading block allocation table is not implemented`)
}
/**
*
* @param {number} blockId
* @param {boolean} onlyBitmap
* @returns {Buffer}
*/
readBlock(blockId, onlyBitmap = false) {
throw new Error(`reading ${onlyBitmap ? 'bitmap of block' : 'block'} ${blockId} is not implemented`)
}
/**
* coalesce the block with id blockId from the child vhd into
* this vhd
*
* @param {AbstractVhd} child
* @param {number} blockId
*
* @returns {number} the merged data size
*/
coalesceBlock(child, blockId) {
throw new Error(`coalescing the block ${blockId} from ${child} is not implemented`)
}
/**
* ensure the bat size can store at least entries block
* move blocks if needed
* @param {number} entries
*/
ensureBatSize(entries) {
throw new Error(`ensuring batSize can store at least ${entries} is not implemented`)
}
// Write a context footer. (At the end and beginning of a vhd file.)
writeFooter(onlyEndFooter = false) {
throw new Error(`writing footer ${onlyEndFooter ? 'only at end' : 'on both side'} is not implemented`)
}
writeHeader() {
throw new Error(`writing header is not implemented`)
}
_writeParentLocatorData(parentLocatorId, platformDataOffset, data) {
throw new Error(`write Parent locator ${parentLocatorId} is not implemented`)
}
_readParentLocatorData(parentLocatorId, platformDataOffset, platformDataSpace) {
throw new Error(`read Parent locator ${parentLocatorId} is not implemented`)
}
// common
get batSize() {
return computeBatSize(this.header.maxTableEntries)
}
async writeParentLocator({ id, platformCode = PLATFORM_NONE, data = Buffer.alloc(0) }) {
assert(id >= 0, 'parent Locator id must be a positive number')
assert(id < PARENT_LOCATOR_ENTRIES, `parent Locator id must be less than ${PARENT_LOCATOR_ENTRIES}`)
await this._writeParentLocatorData(id, data)
const entry = this.header.parentLocatorEntry[id]
const dataSpaceSectors = Math.ceil(data.length / SECTOR_SIZE)
entry.platformCode = platformCode
entry.platformDataSpace = dataSpaceSectors * SECTOR_SIZE
entry.platformDataLength = data.length
}
async readParentLocator(id) {
assert(id >= 0, 'parent Locator id must be a positive number')
assert(id < PARENT_LOCATOR_ENTRIES, `parent Locator id must be less than ${PARENT_LOCATOR_ENTRIES}`)
const data = await this._readParentLocatorData(id)
// offset is storage specific, don't expose it
const { platformCode } = this.header.parentLocatorEntry[id]
return {
platformCode,
id,
data,
}
}
async setUniqueParentLocator(fileNameString) {
await this.writeParentLocator({
id: 0,
platformCode: PLATFORM_W2KU,
data: Buffer.from(fileNameString, 'utf16le'),
})
for (let i = 1; i < PARENT_LOCATOR_ENTRIES; i++) {
await this.writeParentLocator({
id: i,
platformCode: PLATFORM_NONE,
data: Buffer.alloc(0),
})
}
}
async *blocks() {
const nBlocks = this.header.maxTableEntries
for (let blockId = 0; blockId < nBlocks; ++blockId) {
if (await this.containsBlock(blockId)) {
yield await this.readBlock(blockId)
}
}
}
static async rename(handler, sourcePath, targetPath) {
await handler.rename(sourcePath, targetPath)
}
static async unlink(handler, path) {
const resolved = await resolveAlias(handler, path)
try {
await handler.unlink(resolved)
} catch (err) {
if (err.code === 'EISDIR') {
await handler.rmtree(resolved)
} else {
throw err
}
}
// also delete the alias file
if (path !== resolved) {
await handler.unlink(path)
}
}
static async createAlias(handler, aliasPath, targetPath) {
if (!isVhdAlias(aliasPath)) {
throw new Error(`Alias must be named *.alias.vhd, ${aliasPath} given`)
}
if (isVhdAlias(targetPath)) {
throw new Error(`Chaining alias is forbidden ${aliasPath} to ${targetPath}`)
}
// aliasPath and targetPath are absolute path from the root of the handler
// normalize them so they can't escape this dir
const aliasDir = path.dirname(path.resolve('/', aliasPath))
// only store the relative path from alias to target
const relativePathToTarget = path.relative(aliasDir, path.resolve('/', targetPath))
await handler.writeFile(aliasPath, relativePathToTarget)
}
}

View File

@@ -0,0 +1,190 @@
import { buildHeader, buildFooter } from './_utils'
import { createLogger } from '@xen-orchestra/log'
import { fuFooter, fuHeader, checksumStruct } from '../_structs'
import { test, set as setBitmap } from '../_bitmap'
import { VhdAbstract } from './VhdAbstract'
import assert from 'assert'
const { debug } = createLogger('vhd-lib:VhdDirectory')
// ===================================================================
// Directory format
// <path>
// ├─ header // raw content of the header
// ├─ footer // raw content of the footer
// ├─ bat // bit array. A zero bit indicates at a position that this block is not present
// ├─ parentLocatorEntry{0-7} // data of a parent locator
// ├─ blocks // blockId is the position in the BAT
// └─ <the first to {blockId.length -3} numbers of blockId >
// └─ <the three last numbers of blockID > // block content.
export class VhdDirectory extends VhdAbstract {
#uncheckedBlockTable
set header(header) {
super.header = header
this.#blockTable = Buffer.alloc(header.maxTableEntries)
}
get header() {
return super.header
}
get #blockTable() {
assert.notStrictEqual(this.#uncheckedBlockTable, undefined, 'Block table must be initialized before access')
return this.#uncheckedBlockTable
}
set #blockTable(blockTable) {
this.#uncheckedBlockTable = blockTable
}
static async open(handler, path) {
const vhd = new VhdDirectory(handler, path)
// openning a file for reading does not trigger EISDIR as long as we don't really read from it :
// https://man7.org/linux/man-pages/man2/open.2.html
// EISDIR pathname refers to a directory and the access requested
// involved writing (that is, O_WRONLY or O_RDWR is set).
// reading the header ensure we have a well formed directory immediatly
await vhd.readHeaderAndFooter()
return {
dispose: () => {},
value: vhd,
}
}
static async create(handler, path) {
await handler.mkdir(path)
const vhd = new VhdDirectory(handler, path)
return {
dispose: () => {},
value: vhd,
}
}
constructor(handler, path) {
super()
this._handler = handler
this._path = path
}
async readBlockAllocationTable() {
const { buffer } = await this._readChunk('bat')
this.#blockTable = buffer
}
containsBlock(blockId) {
return test(this.#blockTable, blockId)
}
getChunkPath(partName) {
return this._path + '/' + partName
}
async _readChunk(partName) {
// here we can implement compression and / or crypto
const buffer = await this._handler.readFile(this.getChunkPath(partName))
return {
buffer: Buffer.from(buffer),
}
}
async _writeChunk(partName, buffer) {
assert(Buffer.isBuffer(buffer))
// here we can implement compression and / or crypto
// chunks can be in sub directories : create direcotries if necessary
const pathParts = partName.split('/')
let currentPath = this._path
// the last one is the file name
for (let i = 0; i < pathParts.length - 1; i++) {
currentPath += '/' + pathParts[i]
await this._handler.mkdir(currentPath)
}
return this._handler.writeFile(this.getChunkPath(partName), buffer)
}
// put block in subdirectories to limit impact when doing directory listing
_getBlockPath(blockId) {
const blockPrefix = Math.floor(blockId / 1e3)
const blockSuffix = blockId - blockPrefix * 1e3
return `blocks/${blockPrefix}/${blockSuffix}`
}
async readHeaderAndFooter() {
const { buffer: bufHeader } = await this._readChunk('header')
const { buffer: bufFooter } = await this._readChunk('footer')
const footer = buildFooter(bufFooter)
const header = buildHeader(bufHeader, footer)
this.footer = footer
this.header = header
}
async readBlock(blockId, onlyBitmap = false) {
if (onlyBitmap) {
throw new Error(`reading 'bitmap of block' ${blockId} in a VhdDirectory is not implemented`)
}
const { buffer } = await this._readChunk(this._getBlockPath(blockId))
return {
id: blockId,
bitmap: buffer.slice(0, this.bitmapSize),
data: buffer.slice(this.bitmapSize),
buffer,
}
}
ensureBatSize() {
// nothing to do in directory mode
}
async writeFooter() {
const { footer } = this
const rawFooter = fuFooter.pack(footer)
footer.checksum = checksumStruct(rawFooter, fuFooter)
debug(`Write footer (checksum=${footer.checksum}). (data=${rawFooter.toString('hex')})`)
await this._writeChunk('footer', rawFooter)
}
writeHeader() {
const { header } = this
const rawHeader = fuHeader.pack(header)
header.checksum = checksumStruct(rawHeader, fuHeader)
debug(`Write header (checksum=${header.checksum}). (data=${rawHeader.toString('hex')})`)
return this._writeChunk('header', rawHeader)
}
writeBlockAllocationTable() {
assert.notStrictEqual(this.#blockTable, undefined, 'Block allocation table has not been read')
assert.notStrictEqual(this.#blockTable.length, 0, 'Block allocation table is empty')
return this._writeChunk('bat', this.#blockTable)
}
// only works if data are in the same bucket
// and if the full block is modified in child ( which is the case whit xcp)
coalesceBlock(child, blockId) {
this._handler.copy(child.getChunkPath(blockId), this.getChunkPath(blockId))
}
async writeEntireBlock(block) {
await this._writeChunk(this._getBlockPath(block.id), block.buffer)
setBitmap(this.#blockTable, block.id)
}
async _readParentLocatorData(id) {
return (await this._readChunk('parentLocatorEntry' + id)).buffer
}
async _writeParentLocatorData(id, data) {
await this._writeChunk('parentLocatorEntry' + id, data)
this.header.parentLocatorEntry[id].platformDataOffset = 0
}
}

View File

@@ -0,0 +1,164 @@
/* eslint-env jest */
import execa from 'execa'
import fs from 'fs-extra'
import getStream from 'get-stream'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { pFromCallback } from 'promise-toolbox'
import { randomBytes } from 'crypto'
import { VhdFile } from './VhdFile'
import { SECTOR_SIZE } from '../_constants'
import { checkFile, createRandomFile, convertFromRawToVhd, recoverRawContent } from '../tests/utils'
let tempDir = null
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('blocks can be moved', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
await createRandomFile(rawFileName, initalSize)
const vhdFileName = `${tempDir}/randomfile.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new VhdFile(handler, vhdFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd._freeFirstBlockSpace(8000000)
const recoveredFileName = `${tempDir}/recovered`
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(await fs.readFile(rawFileName))
})
test('the BAT MSB is not used for sign', async () => {
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
const emptyFileName = `${tempDir}/empty.vhd`
await execa('qemu-img', ['create', '-fvpc', emptyFileName, '1.8T'])
const handler = getHandler({ url: 'file://' })
const vhd = new VhdFile(handler, emptyFileName)
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
// we want the bit 31 to be on, to prove it's not been used for sign
const hugeWritePositionSectors = Math.pow(2, 31) + 200
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
await checkFile(emptyFileName)
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
await vhd._freeFirstBlockSpace(hugePositionBytes)
// we recover the data manually for speed reasons.
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
// hole before the block of data
const recoveredFileName = `${tempDir}/recovered`
const recoveredFile = await fs.open(recoveredFileName, 'w')
try {
const vhd2 = new VhdFile(handler, emptyFileName)
await vhd2.readHeaderAndFooter()
await vhd2.readBlockAllocationTable()
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
if (vhd.containsBlock(i)) {
const block = (await vhd2.readBlock(i)).data
await fs.write(recoveredFile, block, 0, block.length, vhd2.header.blockSize * i)
}
}
} finally {
fs.close(recoveredFile)
}
const recovered = await getStream.buffer(
await fs.createReadStream(recoveredFileName, {
start: hugePositionBytes,
end: hugePositionBytes + randomBuffer.length - 1,
})
)
expect(recovered).toEqual(randomBuffer)
})
test('writeData on empty file', async () => {
const mbOfRandom = 3
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new VhdFile(handler, emptyFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(0, randomData)
const recoveredFileName = `${tempDir}/recovered`
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
})
test('writeData in 2 non-overlaping operations', async () => {
const mbOfRandom = 3
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new VhdFile(handler, emptyFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
const splitPointSectors = 2
await newVhd.writeData(0, randomData.slice(0, splitPointSectors * 512))
await newVhd.writeData(splitPointSectors, randomData.slice(splitPointSectors * 512))
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
})
test('writeData in 2 overlaping operations', async () => {
const mbOfRandom = 3
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new VhdFile(handler, emptyFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
const endFirstWrite = 3
const startSecondWrite = 2
await newVhd.writeData(0, randomData.slice(0, endFirstWrite * 512))
await newVhd.writeData(startSecondWrite, randomData.slice(startSecondWrite * 512))
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
})
test('BAT can be extended and blocks moved', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
const recoveredFileName = `${tempDir}/recovered`
const vhdFileName = `${tempDir}/randomfile.vhd`
await createRandomFile(rawFileName, initalSize)
await convertFromRawToVhd(rawFileName, vhdFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new VhdFile(handler, vhdFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.ensureBatSize(2000)
await newVhd.writeBlockAllocationTable()
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(await fs.readFile(rawFileName))
})

View File

@@ -1,22 +1,20 @@
import assert from 'assert'
import { createLogger } from '@xen-orchestra/log'
import checkFooter from './checkFooter'
import checkHeader from './_checkHeader'
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
import {
BLOCK_UNUSED,
FOOTER_SIZE,
HEADER_SIZE,
PARENT_LOCATOR_ENTRIES,
PLATFORM_NONE,
PLATFORM_W2KU,
SECTOR_SIZE,
} from './_constants'
PARENT_LOCATOR_ENTRIES,
} from '../_constants'
import { computeBatSize, sectorsToBytes, buildHeader, buildFooter, BUF_BLOCK_UNUSED } from './_utils'
import { createLogger } from '@xen-orchestra/log'
import { fuFooter, fuHeader, checksumStruct } from '../_structs'
import { set as mapSetBit, test as mapTestBit } from '../_bitmap'
import { VhdAbstract } from './VhdAbstract'
import assert from 'assert'
import getFirstAndLastBlocks from '../_getFirstAndLastBlocks'
const { debug } = createLogger('vhd-lib:Vhd')
const { debug } = createLogger('vhd-lib:VhdFile')
// ===================================================================
//
@@ -28,22 +26,6 @@ const { debug } = createLogger('vhd-lib:Vhd')
//
// ===================================================================
const computeBatSize = entries => sectorsToBytes(sectorsRoundUpNoZero(entries * 4))
// Sectors conversions.
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / SECTOR_SIZE) || 1
const sectorsToBytes = sectors => sectors * SECTOR_SIZE
const assertChecksum = (name, buf, struct) => {
const actual = unpackField(struct.fields.checksum, buf)
const expected = checksumStruct(buf, struct)
assert.strictEqual(actual, expected, `invalid ${name} checksum ${actual}, expected ${expected}`)
}
// unused block as buffer containing a uint32BE
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(4)
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
// ===================================================================
// Format:
@@ -68,12 +50,60 @@ BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
// - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize
// - sectorSize = 512
export default class Vhd {
export class VhdFile extends VhdAbstract {
#uncheckedBlockTable
get #blockTable() {
assert.notStrictEqual(this.#uncheckedBlockTable, undefined, 'Block table must be initialized before access')
return this.#uncheckedBlockTable
}
set #blockTable(blockTable) {
this.#uncheckedBlockTable = blockTable
}
get batSize() {
return computeBatSize(this.header.maxTableEntries)
}
set header(header) {
super.header = header
const size = this.batSize
this.#blockTable = Buffer.alloc(size)
for (let i = 0; i < this.header.maxTableEntries; i++) {
this.#blockTable.writeUInt32BE(BLOCK_UNUSED, i * 4)
}
}
get header() {
return super.header
}
static async open(handler, path) {
const fd = await handler.openFile(path, 'r+')
const vhd = new VhdFile(handler, fd)
// openning a file for reading does not trigger EISDIR as long as we don't really read from it :
// https://man7.org/linux/man-pages/man2/open.2.html
// EISDIR pathname refers to a directory and the access requested
// involved writing (that is, O_WRONLY or O_RDWR is set).
// reading the header ensure we have a well formed file immediatly
await vhd.readHeaderAndFooter()
return {
dispose: () => handler.closeFile(fd),
value: vhd,
}
}
static async create(handler, path) {
const fd = await handler.openFile(path, 'wx')
const vhd = new VhdFile(handler, fd)
return {
dispose: () => handler.closeFile(fd),
value: vhd,
}
}
constructor(handler, path) {
super()
this._handler = handler
this._path = path
}
@@ -87,11 +117,6 @@ export default class Vhd {
assert.strictEqual(bytesRead, n)
return buffer
}
containsBlock(id) {
return this._getBatEntry(id) !== BLOCK_UNUSED
}
// Returns the first address after metadata. (In bytes)
_getEndOfHeaders() {
const { header } = this
@@ -114,17 +139,24 @@ export default class Vhd {
return end
}
// return the first sector (bitmap) of a block
_getBatEntry(blockId) {
const i = blockId * 4
const blockTable = this.#blockTable
return i < blockTable.length ? blockTable.readUInt32BE(i) : BLOCK_UNUSED
}
// Returns the first sector after data.
_getEndOfData() {
let end = Math.ceil(this._getEndOfHeaders() / SECTOR_SIZE)
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
const sectorsOfFullBlock = this.sectorsOfBitmap + this.sectorsPerBlock
const { maxTableEntries } = this.header
for (let i = 0; i < maxTableEntries; i++) {
const blockAddr = this._getBatEntry(i)
if (blockAddr !== BLOCK_UNUSED) {
end = Math.max(end, blockAddr + fullBlockSize)
end = Math.max(end, blockAddr + sectorsOfFullBlock)
}
}
@@ -133,7 +165,11 @@ export default class Vhd {
return sectorsToBytes(end)
}
// TODO: extract the checks into reusable functions:
containsBlock(id) {
return this._getBatEntry(id) !== BLOCK_UNUSED
}
// TODO:
// - better human reporting
// - auto repair if possible
async readHeaderAndFooter(checkSecondFooter = true) {
@@ -141,50 +177,25 @@ export default class Vhd {
const bufFooter = buf.slice(0, FOOTER_SIZE)
const bufHeader = buf.slice(FOOTER_SIZE)
assertChecksum('footer', bufFooter, fuFooter)
assertChecksum('header', bufHeader, fuHeader)
const footer = buildFooter(bufFooter)
const header = buildHeader(bufHeader, footer)
if (checkSecondFooter) {
const size = await this._handler.getSize(this._path)
assert(bufFooter.equals(await this._read(size - FOOTER_SIZE, FOOTER_SIZE)), 'footer1 !== footer2')
}
const footer = (this.footer = fuFooter.unpack(bufFooter))
checkFooter(footer)
const header = (this.header = fuHeader.unpack(bufHeader))
checkHeader(header, footer)
// Compute the number of sectors in one block.
// Default: One block contains 4096 sectors of 512 bytes.
const sectorsPerBlock = (this.sectorsPerBlock = header.blockSize / SECTOR_SIZE)
// Compute bitmap size in sectors.
// Default: 1.
const sectorsOfBitmap = (this.sectorsOfBitmap = sectorsRoundUpNoZero(sectorsPerBlock >> 3))
// Full block size => data block size + bitmap size.
this.fullBlockSize = sectorsToBytes(sectorsPerBlock + sectorsOfBitmap)
// In bytes.
// Default: 512.
this.bitmapSize = sectorsToBytes(sectorsOfBitmap)
this.footer = footer
this.header = header
}
// Returns a buffer that contains the block allocation table of a vhd file.
async readBlockAllocationTable() {
const { header } = this
this.blockTable = await this._read(header.tableOffset, header.maxTableEntries * 4)
this.#blockTable = await this._read(header.tableOffset, header.maxTableEntries * 4)
}
// return the first sector (bitmap) of a block
_getBatEntry(blockId) {
const i = blockId * 4
const { blockTable } = this
return i < blockTable.length ? blockTable.readUInt32BE(i) : BLOCK_UNUSED
}
_readBlock(blockId, onlyBitmap = false) {
readBlock(blockId, onlyBitmap = false) {
const blockAddr = this._getBatEntry(blockId)
if (blockAddr === BLOCK_UNUSED) {
throw new Error(`no such block ${blockId}`)
@@ -214,7 +225,7 @@ export default class Vhd {
}
async _freeFirstBlockSpace(spaceNeededBytes) {
const firstAndLastBlocks = getFirstAndLastBlocks(this.blockTable)
const firstAndLastBlocks = getFirstAndLastBlocks(this.#blockTable)
if (firstAndLastBlocks === undefined) {
return
}
@@ -249,8 +260,8 @@ export default class Vhd {
const newBatSize = computeBatSize(entries)
await this._freeFirstBlockSpace(newBatSize - this.batSize)
const maxTableEntries = (header.maxTableEntries = entries)
const prevBat = this.blockTable
const bat = (this.blockTable = Buffer.allocUnsafe(newBatSize))
const prevBat = this.#blockTable
const bat = (this.#blockTable = Buffer.allocUnsafe(newBatSize))
prevBat.copy(bat)
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * 4)
debug(`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`)
@@ -264,7 +275,7 @@ export default class Vhd {
// set the first sector (bitmap) of a block
_setBatEntry(block, blockSector) {
const i = block * 4
const { blockTable } = this
const blockTable = this.#blockTable
blockTable.writeUInt32BE(blockSector, i)
@@ -298,7 +309,7 @@ export default class Vhd {
await this._write(bitmap, sectorsToBytes(blockAddr))
}
async _writeEntireBlock(block) {
async writeEntireBlock(block) {
let blockAddr = this._getBatEntry(block.id)
if (blockAddr === BLOCK_UNUSED) {
@@ -314,7 +325,7 @@ export default class Vhd {
blockAddr = await this._createBlock(block.id)
parentBitmap = Buffer.alloc(this.bitmapSize, 0)
} else if (parentBitmap === undefined) {
parentBitmap = (await this._readBlock(block.id, true)).bitmap
parentBitmap = (await this.readBlock(block.id, true)).bitmap
}
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
@@ -333,7 +344,7 @@ export default class Vhd {
}
async coalesceBlock(child, blockId) {
const block = await child._readBlock(blockId)
const block = await child.readBlock(blockId)
const { bitmap, data } = block
debug(`coalesceBlock block=${blockId}`)
@@ -358,10 +369,10 @@ export default class Vhd {
const isFullBlock = i === 0 && endSector === sectorsPerBlock
if (isFullBlock) {
await this._writeEntireBlock(block)
await this.writeEntireBlock(block)
} else {
if (parentBitmap === null) {
parentBitmap = (await this._readBlock(blockId, true)).bitmap
parentBitmap = (await this.readBlock(blockId, true)).bitmap
}
await this._writeBlockSectors(block, i, endSector, parentBitmap)
}
@@ -399,6 +410,13 @@ export default class Vhd {
return this._write(rawHeader, offset)
}
writeBlockAllocationTable() {
const header = this.header
const blockTable = this.#blockTable
debug(`Write BlockAllocationTable at: ${header.tableOffset} ). (data=${blockTable.toString('hex')})`)
return this._write(blockTable, header.tableOffset)
}
async writeData(offsetSectors, buffer) {
const bufferSizeSectors = Math.ceil(buffer.length / SECTOR_SIZE)
const startBlock = Math.floor(offsetSectors / this.sectorsPerBlock)
@@ -436,26 +454,47 @@ export default class Vhd {
const deltaSectors = neededSectors - currentSpace
await this._freeFirstBlockSpace(sectorsToBytes(deltaSectors))
this.header.tableOffset += sectorsToBytes(deltaSectors)
await this._write(this.blockTable, this.header.tableOffset)
await this._write(this.#blockTable, this.header.tableOffset)
}
return firstLocatorOffset
}
async setUniqueParentLocator(fileNameString) {
async _readParentLocatorData(parentLocatorId) {
const { platformDataOffset, platformDataLength } = this.header.parentLocatorEntry[parentLocatorId]
if (platformDataLength > 0) {
return (await this._read(platformDataOffset, platformDataLength)).buffer
}
return Buffer.alloc(0)
}
async _writeParentLocatorData(parentLocatorId, data) {
let position
const { header } = this
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
const encodedFilename = Buffer.from(fileNameString, 'utf16le')
const dataSpaceSectors = Math.ceil(encodedFilename.length / SECTOR_SIZE)
const position = await this._ensureSpaceForParentLocators(dataSpaceSectors)
await this._write(encodedFilename, position)
header.parentLocatorEntry[0].platformDataSpace = dataSpaceSectors * SECTOR_SIZE
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
header.parentLocatorEntry[0].platformDataOffset = position
for (let i = 1; i < 8; i++) {
header.parentLocatorEntry[i].platformCode = PLATFORM_NONE
header.parentLocatorEntry[i].platformDataSpace = 0
header.parentLocatorEntry[i].platformDataLength = 0
header.parentLocatorEntry[i].platformDataOffset = 0
if (data.length === 0) {
// reset offset if data is empty
header.parentLocatorEntry[parentLocatorId].platformDataOffset = 0
} else {
if (data.length <= header.parentLocatorEntry[parentLocatorId].platformDataSpace) {
// new parent locator length is smaller than available space : keep it in place
position = header.parentLocatorEntry[parentLocatorId].platformDataOffset
} else {
const firstAndLastBlocks = getFirstAndLastBlocks(this.#blockTable)
if (firstAndLastBlocks === undefined) {
// no block in data : put the parent locatorn entry at the end
position = this._getEndOfData()
} else {
// need more size
// since there can be multiple parent locator entry, we can't extend the entry in place
// move the first(s) block(s) at the end of the data
// move the parent locator to the precedent position of the first block
const { firstSector } = firstAndLastBlocks
await this._freeFirstBlockSpace(header.parentLocatorEntry[parentLocatorId].platformDataSpace)
position = sectorsToBytes(firstSector)
}
}
await this._write(data, position)
header.parentLocatorEntry[parentLocatorId].platformDataOffset = position
}
}
}

View File

@@ -0,0 +1,52 @@
import assert from 'assert'
import { BLOCK_UNUSED, SECTOR_SIZE } from '../_constants'
import { fuFooter, fuHeader, checksumStruct, unpackField } from '../_structs'
import checkFooter from '../checkFooter'
import checkHeader from '../_checkHeader'
export const computeBatSize = entries => sectorsToBytes(sectorsRoundUpNoZero(entries * 4))
// Sectors conversions.
export const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / SECTOR_SIZE) || 1
export const sectorsToBytes = sectors => sectors * SECTOR_SIZE
export const assertChecksum = (name, buf, struct) => {
const actual = unpackField(struct.fields.checksum, buf)
const expected = checksumStruct(buf, struct)
assert.strictEqual(actual, expected, `invalid ${name} checksum ${actual}, expected ${expected}`)
}
// unused block as buffer containing a uint32BE
export const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(4)
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
/**
* Check and parse the header buffer to build an header object
*
* @param {Buffer} bufHeader
* @param {Object} footer
* @returns {Object} the parsed header
*/
export const buildHeader = (bufHeader, footer) => {
assertChecksum('header', bufHeader, fuHeader)
const header = fuHeader.unpack(bufHeader)
checkHeader(header, footer)
return header
}
/**
* Check and parse the footer buffer to build a footer object
*
* @param {Buffer} bufHeader
* @param {Object} footer
* @returns {Object} the parsed footer
*/
export const buildFooter = bufFooter => {
assertChecksum('footer', bufFooter, fuFooter)
const footer = fuFooter.unpack(bufFooter)
checkFooter(footer)
return footer
}

View File

@@ -0,0 +1,10 @@
/* eslint-env jest */
import { createFooter } from './_createFooterHeader'
test('createFooter() does not crash', () => {
createFooter(104448, Math.floor(Date.now() / 1000), {
cylinders: 3,
heads: 4,
sectorsPerTrack: 17,
})
})

View File

@@ -6,7 +6,9 @@ import { BLOCK_UNUSED } from './_constants'
// in the file
export default bat => {
const n = bat.length
assert.notStrictEqual(n, 0)
if (n === 0) {
return
}
assert.strictEqual(n % 4, 0)
let i = 0

View File

@@ -0,0 +1,66 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getSyncedHandler } from '@xen-orchestra/fs'
import { Disposable, pFromCallback } from 'promise-toolbox'
import { isVhdAlias, resolveAlias } from './_resolveAlias'
let tempDir
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('is vhd alias recognize only *.alias.vhd files', () => {
expect(isVhdAlias('filename.alias.vhd')).toEqual(true)
expect(isVhdAlias('alias.vhd')).toEqual(false)
expect(isVhdAlias('filename.vhd')).toEqual(false)
expect(isVhdAlias('filename.alias.vhd.other')).toEqual(false)
})
test('resolve return the path in argument for a non alias file ', async () => {
expect(await resolveAlias(null, 'filename.vhd')).toEqual('filename.vhd')
})
test('resolve get the path of the target file for an alias', async () => {
await Disposable.use(async function* () {
// same directory
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
await handler.mkdir(`alias`)
const aliasPath = 'alias/alias.alias.vhd'
const testOneCombination = async ({ targetPath, targetContent }) => {
await handler.writeFile(aliasPath, targetPath, { flags: 'w' })
const resolved = await resolveAlias(handler, aliasPath)
expect(resolved).toEqual(targetContent)
await handler.unlink(aliasPath)
}
// the alias contain the relative path to the file. The resolved values is the full path from the root of the remote
const combinations = [
{ targetPath: `../targets.vhd`, targetContent: `targets.vhd` },
{ targetPath: `targets.vhd`, targetContent: `alias/targets.vhd` },
{ targetPath: `sub/targets.vhd`, targetContent: `alias/sub/targets.vhd` },
{ targetPath: `../sibling/targets.vhd`, targetContent: `sibling/targets.vhd` },
]
for (const { targetPath, targetContent } of combinations) {
await testOneCombination({ targetPath, targetContent })
}
})
})
test('resolve throws an error an alias to an alias', async () => {
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const alias = 'alias.alias.vhd'
const target = 'target.alias.vhd'
await handler.writeFile(alias, target)
expect(async () => await resolveAlias(handler, alias)).rejects.toThrow(Error)
})
})

View File

@@ -0,0 +1,18 @@
import resolveRelativeFromFile from './_resolveRelativeFromFile'
export function isVhdAlias(filename) {
return filename.endsWith('.alias.vhd')
}
export async function resolveAlias(handler, filename) {
if (!isVhdAlias(filename)) {
return filename
}
const aliasContent = (await handler.readFile(filename)).toString().trim()
// also handle circular references and unreasonnably long chains
if (isVhdAlias(aliasContent)) {
throw new Error(`Chaining alias is forbidden ${filename} to ${aliasContent}`)
}
// the target is relative to the alias location
return resolveRelativeFromFile(filename, aliasContent)
}

View File

@@ -1,11 +1,11 @@
import { dirname, relative } from 'path'
import Vhd from './vhd'
import { VhdFile } from './'
import { DISK_TYPE_DIFFERENCING } from './_constants'
export default async function chain(parentHandler, parentPath, childHandler, childPath, force = false) {
const parentVhd = new Vhd(parentHandler, parentPath)
const childVhd = new Vhd(childHandler, childPath)
const parentVhd = new VhdFile(parentHandler, parentPath)
const childVhd = new VhdFile(childHandler, childPath)
await childVhd.readHeaderAndFooter()
const { header, footer } = childVhd

View File

@@ -1,10 +1,10 @@
import Vhd from './vhd'
import { VhdFile } from '.'
import resolveRelativeFromFile from './_resolveRelativeFromFile'
import { DISK_TYPE_DYNAMIC } from './_constants'
export default async function checkChain(handler, path) {
while (true) {
const vhd = new Vhd(handler, path)
const vhd = new VhdFile(handler, path)
await vhd.readHeaderAndFooter()
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {

View File

@@ -1,11 +1,11 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import Vhd from './vhd'
import { VhdFile } from '.'
export default asyncIteratorToStream(async function* (handler, path) {
const fd = await handler.openFile(path, 'r')
try {
const vhd = new Vhd(handler, fd)
const vhd = new VhdFile(handler, fd)
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
const {
@@ -17,10 +17,10 @@ export default asyncIteratorToStream(async function* (handler, path) {
const emptyBlock = Buffer.alloc(blockSize)
for (let i = 0; i < nFullBlocks; ++i) {
yield vhd.containsBlock(i) ? (await vhd._readBlock(i)).data : emptyBlock
yield vhd.containsBlock(i) ? (await vhd.readBlock(i)).data : emptyBlock
}
if (nLeftoverBytes !== 0) {
yield (vhd.containsBlock(nFullBlocks) ? (await vhd._readBlock(nFullBlocks)).data : emptyBlock).slice(
yield (vhd.containsBlock(nFullBlocks) ? (await vhd.readBlock(nFullBlocks)).data : emptyBlock).slice(
0,
nLeftoverBytes
)

View File

@@ -8,7 +8,7 @@ import { pipeline } from 'readable-stream'
import { createReadableRawStream, createReadableSparseStream } from './'
import { createFooter } from './_createFooterHeader'
import { checkFile, convertFromVhdToRaw } from './tests/utils'
let tempDir = null
@@ -20,14 +20,6 @@ afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('createFooter() does not crash', () => {
createFooter(104448, Math.floor(Date.now() / 1000), {
cylinders: 3,
heads: 4,
sectorsPerTrack: 17,
})
})
test('ReadableRawVHDStream does not crash', async () => {
const data = [
{
@@ -102,6 +94,7 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
},
]
const fileSize = blockSize * 110
const stream = await createReadableSparseStream(
fileSize,
blockSize,
@@ -110,9 +103,10 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
)
expect(stream.length).toEqual(4197888)
const pipe = stream.pipe(createWriteStream(`${tempDir}/output.vhd`))
await fromEvent(pipe, 'finish')
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
await execa('qemu-img', ['convert', '-f', 'vpc', '-O', 'raw', `${tempDir}/output.vhd`, `${tempDir}/out1.raw`])
await checkFile(`${tempDir}/output.vhd`)
await convertFromVhdToRaw(`${tempDir}/output.vhd`, `${tempDir}/out1.raw`)
const out1 = await readFile(`${tempDir}/out1.raw`)
const expected = Buffer.alloc(fileSize)
blocks.forEach(b => {

View File

@@ -0,0 +1,42 @@
/* eslint-env jest */
import execa from 'execa'
import fs from 'fs-extra'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { checkFile, createRandomFile, convertFromRawToVhd } from './tests/utils'
import { createSyntheticStream } from '.'
let tempDir = null
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test.only('createSyntheticStream passes vhd-util check', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
const vhdFileName = `${tempDir}/randomfile.vhd`
const recoveredVhdFileName = `${tempDir}/recovered.vhd`
await createRandomFile(rawFileName, initalSize)
await convertFromRawToVhd(rawFileName, vhdFileName)
await checkFile(vhdFileName)
const handler = getHandler({ url: 'file://' })
const stream = await createSyntheticStream(handler, vhdFileName)
const expectedVhdSize = (await fs.stat(vhdFileName)).size
expect(stream.length).toEqual((await fs.stat(vhdFileName)).size)
await pFromCallback(cb => pipeline(stream, fs.createWriteStream(recoveredVhdFileName), cb))
await checkFile(recoveredVhdFileName)
const stats = await fs.stat(recoveredVhdFileName)
expect(stats.size).toEqual(expectedVhdSize)
await execa('qemu-img', ['compare', recoveredVhdFileName, rawFileName])
})

View File

@@ -3,7 +3,7 @@ import { createLogger } from '@xen-orchestra/log'
import resolveRelativeFromFile from './_resolveRelativeFromFile'
import Vhd from './vhd'
import { VhdFile } from '.'
import { BLOCK_UNUSED, DISK_TYPE_DYNAMIC, FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from './_constants'
import { fuFooter, fuHeader, checksumStruct } from './_structs'
import { test as mapTestBit } from './_bitmap'
@@ -27,7 +27,7 @@ export default async function createSyntheticStream(handler, paths) {
const open = async path => {
const fd = await handler.openFile(path, 'r')
fds.push(fd)
const vhd = new Vhd(handler, fd)
const vhd = new VhdFile(handler, fd)
vhds.push(vhd)
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
@@ -126,7 +126,7 @@ export default async function createSyntheticStream(handler, paths) {
}
let block = blocksByVhd.get(vhd)
if (block === undefined) {
block = yield vhd._readBlock(iBlock)
block = yield vhd.readBlock(iBlock)
blocksByVhd.set(vhd, block)
}
const { bitmap, data } = block

View File

@@ -1,6 +1,5 @@
/* eslint-env jest */
import asyncIteratorToStream from 'async-iterator-to-stream'
import execa from 'execa'
import fs from 'fs-extra'
import rimraf from 'rimraf'
@@ -12,6 +11,7 @@ import { pipeline } from 'readable-stream'
import { createVhdStreamWithLength } from '.'
import { FOOTER_SIZE } from './_constants'
import { createRandomFile, convertFromRawToVhd, convertFromVhdToRaw } from './tests/utils'
let tempDir = null
@@ -23,27 +23,6 @@ afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
const RAW = 'raw'
const VHD = 'vpc'
const convert = (inputFormat, inputFile, outputFormat, outputFile) =>
execa('qemu-img', ['convert', '-f', inputFormat, '-O', outputFormat, inputFile, outputFile])
const createRandomStream = asyncIteratorToStream(function* (size) {
let requested = Math.min(size, yield)
while (size > 0) {
const buf = Buffer.allocUnsafe(requested)
for (let i = 0; i < requested; ++i) {
buf[i] = Math.floor(Math.random() * 256)
}
requested = Math.min((size -= requested), yield buf)
}
})
async function createRandomFile(name, size) {
const input = await createRandomStream(size)
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
}
const forOwn = (object, cb) => Object.keys(object).forEach(key => cb(object[key], key, object))
describe('createVhdStreamWithLength', () => {
@@ -58,10 +37,10 @@ describe('createVhdStreamWithLength', () => {
(size, title) =>
it(title, async () => {
const inputRaw = `${tempDir}/input.raw`
await createRandomFile(inputRaw, size)
await createRandomFile(inputRaw, size / 1024 / 1024)
const inputVhd = `${tempDir}/input.vhd`
await convert(RAW, inputRaw, VHD, inputVhd)
await convertFromRawToVhd(inputRaw, inputVhd)
const result = await createVhdStreamWithLength(await createReadStream(inputVhd))
const { length } = result
@@ -75,18 +54,18 @@ describe('createVhdStreamWithLength', () => {
// ensure the generated VHD is correct and contains the same data
const outputRaw = `${tempDir}/output.raw`
await convert(VHD, outputVhd, RAW, outputRaw)
await convertFromVhdToRaw(outputVhd, outputRaw)
await execa('cmp', [inputRaw, outputRaw])
})
)
it('can skip blank after the last block and before the footer', async () => {
const initialSize = 4 * 1024
const initialSize = 4
const rawFileName = `${tempDir}/randomfile`
const vhdName = `${tempDir}/randomfile.vhd`
const outputVhdName = `${tempDir}/output.vhd`
await createRandomFile(rawFileName, initialSize)
await convert(RAW, rawFileName, VHD, vhdName)
await convertFromRawToVhd(rawFileName, vhdName)
const { size: vhdSize } = await fs.stat(vhdName)
// read file footer
const footer = await getStream.buffer(createReadStream(vhdName, { start: vhdSize - FOOTER_SIZE }))

View File

@@ -1,11 +1,14 @@
export { default } from './vhd'
export { default as chainVhd } from './chain'
export { default as checkFooter } from './checkFooter'
export { default as checkVhdChain } from './checkChain'
export { default as createContentStream } from './createContentStream'
export { default as createReadableRawStream } from './createReadableRawStream'
export { default as createReadableSparseStream } from './createReadableSparseStream'
export { default as createSyntheticStream } from './createSyntheticStream'
export { default as mergeVhd } from './merge'
export { default as createVhdStreamWithLength } from './createVhdStreamWithLength'
export { default as mergeVhd } from './merge'
export { default as peekFooterFromVhdStream } from './peekFooterFromVhdStream'
export { default as checkFooter } from './checkFooter'
export { openVhd } from './openVhd'
export { VhdDirectory } from './Vhd/VhdDirectory'
export { VhdFile } from './Vhd/VhdFile'
export * as Constants from './_constants'

View File

@@ -1,19 +1,16 @@
/* eslint-env jest */
import asyncIteratorToStream from 'async-iterator-to-stream'
import execa from 'execa'
import fs from 'fs-extra'
import getStream from 'get-stream'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { randomBytes } from 'crypto'
import Vhd, { chainVhd, createSyntheticStream, mergeVhd as vhdMerge } from './index'
import { VhdFile, chainVhd, mergeVhd as vhdMerge } from './index'
import { SECTOR_SIZE } from './_constants'
import { checkFile, createRandomFile, convertFromRawToVhd, recoverRawContent } from './tests/utils'
let tempDir = null
@@ -27,256 +24,55 @@ afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
async function createRandomFile(name, sizeMB) {
const createRandomStream = asyncIteratorToStream(function* (size) {
while (size-- > 0) {
yield Buffer.from([Math.floor(Math.random() * 256)])
}
})
const input = createRandomStream(sizeMB * 1024 * 1024)
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
}
async function checkFile(vhdName) {
await execa('vhd-util', ['check', '-p', '-b', '-t', '-n', vhdName])
}
async function recoverRawContent(vhdName, rawName, originalSize) {
await checkFile(vhdName)
await execa('qemu-img', ['convert', '-fvpc', '-Oraw', vhdName, rawName])
if (originalSize !== undefined) {
await execa('truncate', ['-s', originalSize, rawName])
}
}
async function convertFromRawToVhd(rawName, vhdName) {
await execa('qemu-img', ['convert', '-f', 'raw', '-Ovpc', rawName, vhdName])
}
test('blocks can be moved', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
await createRandomFile(rawFileName, initalSize)
const vhdFileName = `${tempDir}/randomfile.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, vhdFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd._freeFirstBlockSpace(8000000)
const recoveredFileName = `${tempDir}/recovered`
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(await fs.readFile(rawFileName))
})
test('the BAT MSB is not used for sign', async () => {
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
const emptyFileName = `${tempDir}/empty.vhd`
await execa('qemu-img', ['create', '-fvpc', emptyFileName, '1.8T'])
const handler = getHandler({ url: 'file://' })
const vhd = new Vhd(handler, emptyFileName)
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
// we want the bit 31 to be on, to prove it's not been used for sign
const hugeWritePositionSectors = Math.pow(2, 31) + 200
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
await checkFile(emptyFileName)
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
await vhd._freeFirstBlockSpace(hugePositionBytes)
// we recover the data manually for speed reasons.
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
// hole before the block of data
const recoveredFileName = `${tempDir}/recovered`
const recoveredFile = await fs.open(recoveredFileName, 'w')
try {
const vhd2 = new Vhd(handler, emptyFileName)
await vhd2.readHeaderAndFooter()
await vhd2.readBlockAllocationTable()
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
const entry = vhd._getBatEntry(i)
if (entry !== 0xffffffff) {
const block = (await vhd2._readBlock(i)).data
await fs.write(recoveredFile, block, 0, block.length, vhd2.header.blockSize * i)
}
}
} finally {
fs.close(recoveredFile)
}
const recovered = await getStream.buffer(
await fs.createReadStream(recoveredFileName, {
start: hugePositionBytes,
end: hugePositionBytes + randomBuffer.length - 1,
})
)
expect(recovered).toEqual(randomBuffer)
})
test('writeData on empty file', async () => {
const mbOfRandom = 3
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, emptyFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(0, randomData)
const recoveredFileName = `${tempDir}/recovered`
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
})
test('writeData in 2 non-overlaping operations', async () => {
const mbOfRandom = 3
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, emptyFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
const splitPointSectors = 2
await newVhd.writeData(0, randomData.slice(0, splitPointSectors * 512))
await newVhd.writeData(splitPointSectors, randomData.slice(splitPointSectors * 512))
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
})
test('writeData in 2 overlaping operations', async () => {
const mbOfRandom = 3
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, emptyFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
const endFirstWrite = 3
const startSecondWrite = 2
await newVhd.writeData(0, randomData.slice(0, endFirstWrite * 512))
await newVhd.writeData(startSecondWrite, randomData.slice(startSecondWrite * 512))
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
})
test('BAT can be extended and blocks moved', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
const recoveredFileName = `${tempDir}/recovered`
const vhdFileName = `${tempDir}/randomfile.vhd`
await createRandomFile(rawFileName, initalSize)
await convertFromRawToVhd(rawFileName, vhdFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, vhdFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.ensureBatSize(2000)
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(await fs.readFile(rawFileName))
})
test('coalesce works with empty parent files', async () => {
const mbOfRandom = 2
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
const vhdFileName = `${tempDir}/randomfile.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(rawFileName, mbOfRandom)
await convertFromRawToVhd(rawFileName, vhdFileName)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 1 + 'M'])
await checkFile(vhdFileName)
await checkFile(emptyFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler._getSize(rawFileName)
await chainVhd(handler, emptyFileName, handler, vhdFileName, true)
await checkFile(vhdFileName)
await checkFile(emptyFileName)
await vhdMerge(handler, emptyFileName, handler, vhdFileName)
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(await fs.readFile(rawFileName))
})
test('coalesce works in normal cases', async () => {
const mbOfRandom = 5
const randomFileName = `${tempDir}/randomfile`
const random2FileName = `${tempDir}/randomfile2`
const smallRandomFileName = `${tempDir}/small_randomfile`
const parentFileName = `${tempDir}/parent.vhd`
const child1FileName = `${tempDir}/child1.vhd`
const child2FileName = `${tempDir}/child2.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(randomFileName, mbOfRandom)
await createRandomFile(smallRandomFileName, Math.ceil(mbOfRandom / 2))
await execa('qemu-img', ['create', '-fvpc', parentFileName, mbOfRandom + 1 + 'M'])
await convertFromRawToVhd(randomFileName, child1FileName)
const handler = getHandler({ url: 'file://' })
await execa('vhd-util', ['snapshot', '-n', child2FileName, '-p', child1FileName])
const vhd = new Vhd(handler, child2FileName)
const randomFilePath = `${tempDir}/randomfile`
const random2FilePath = `${tempDir}/randomfile2`
const smallRandomFilePath = `${tempDir}/small_randomfile`
const parentFilePath = `${tempDir}/parent.vhd`
const child1FilePath = `${tempDir}/child1.vhd`
const child2FilePath = `${tempDir}/child2.vhd`
const recoveredFilePath = `${tempDir}/recovered`
await createRandomFile(randomFilePath, mbOfRandom)
await createRandomFile(smallRandomFilePath, Math.ceil(mbOfRandom / 2))
await execa('qemu-img', ['create', '-fvpc', parentFilePath, mbOfRandom + 1 + 'M'])
await checkFile(parentFilePath)
await convertFromRawToVhd(randomFilePath, child1FilePath)
const handler = getHandler({ url: `file://${tempDir}/` })
await execa('vhd-util', ['snapshot', '-n', child2FilePath, '-p', child1FilePath])
const vhd = new VhdFile(handler, 'child2.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
vhd.footer.creatorApplication = 'xoa'
await vhd.writeFooter()
const originalSize = await handler._getSize(randomFileName)
await chainVhd(handler, parentFileName, handler, child1FileName, true)
await execa('vhd-util', ['check', '-t', '-n', child1FileName])
await chainVhd(handler, child1FileName, handler, child2FileName, true)
await execa('vhd-util', ['check', '-t', '-n', child2FileName])
const smallRandom = await fs.readFile(smallRandomFileName)
const newVhd = new Vhd(handler, child2FileName)
const originalSize = await handler._getSize('randomfile')
await checkFile(child1FilePath)
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
await checkFile(child1FilePath)
await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true)
await checkFile(child2FilePath)
const smallRandom = await fs.readFile(smallRandomFilePath)
const newVhd = new VhdFile(handler, 'child2.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(5, smallRandom)
await checkFile(child2FileName)
await checkFile(child1FileName)
await checkFile(parentFileName)
await vhdMerge(handler, parentFileName, handler, child1FileName)
await checkFile(parentFileName)
await chainVhd(handler, parentFileName, handler, child2FileName, true)
await checkFile(child2FileName)
await vhdMerge(handler, parentFileName, handler, child2FileName)
await checkFile(parentFileName)
await recoverRawContent(parentFileName, recoveredFileName, originalSize)
await execa('cp', [randomFileName, random2FileName])
const fd = await fs.open(random2FileName, 'r+')
await checkFile(child2FilePath)
await checkFile(child1FilePath)
await checkFile(parentFilePath)
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
await checkFile(parentFilePath)
await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true)
await checkFile(child2FilePath)
await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd')
await checkFile(parentFilePath)
await recoverRawContent(parentFilePath, recoveredFilePath, originalSize)
await execa('cp', [randomFilePath, random2FilePath])
const fd = await fs.open(random2FilePath, 'r+')
try {
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
} finally {
await fs.close(fd)
}
expect(await fs.readFile(recoveredFileName)).toEqual(await fs.readFile(random2FileName))
})
test.only('createSyntheticStream passes vhd-util check', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
const vhdFileName = `${tempDir}/randomfile.vhd`
const recoveredVhdFileName = `${tempDir}/recovered.vhd`
await createRandomFile(rawFileName, initalSize)
await convertFromRawToVhd(rawFileName, vhdFileName)
await checkFile(vhdFileName)
const handler = getHandler({ url: 'file://' })
const stream = await createSyntheticStream(handler, vhdFileName)
const expectedVhdSize = (await fs.stat(vhdFileName)).size
expect(stream.length).toEqual((await fs.stat(vhdFileName)).size)
await pFromCallback(cb => pipeline(stream, fs.createWriteStream(recoveredVhdFileName), cb))
await checkFile(recoveredVhdFileName)
const stats = await fs.stat(recoveredVhdFileName)
expect(stats.size).toEqual(expectedVhdSize)
await execa('qemu-img', ['compare', recoveredVhdFileName, rawFileName])
expect(await fs.readFile(recoveredFilePath)).toEqual(await fs.readFile(random2FilePath))
})

View File

@@ -5,7 +5,7 @@ import noop from './_noop'
import { createLogger } from '@xen-orchestra/log'
import { limitConcurrency } from 'limit-concurrency-decorator'
import Vhd from './vhd'
import { VhdFile } from '.'
import { basename, dirname } from 'path'
import { DISK_TYPE_DIFFERENCING, DISK_TYPE_DYNAMIC } from './_constants'
@@ -25,10 +25,10 @@ export default limitConcurrency(2)(async function merge(
const parentFd = await parentHandler.openFile(parentPath, 'r+')
try {
const parentVhd = new Vhd(parentHandler, parentFd)
const parentVhd = new VhdFile(parentHandler, parentFd)
const childFd = await childHandler.openFile(childPath, 'r')
try {
const childVhd = new Vhd(childHandler, childFd)
const childVhd = new VhdFile(childHandler, childFd)
let mergeState = await parentHandler.readFile(mergeStatePath).catch(error => {
if (error.code !== 'ENOENT') {

View File

@@ -0,0 +1,62 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getSyncedHandler } from '@xen-orchestra/fs'
import { Disposable, pFromCallback } from 'promise-toolbox'
import { openVhd } from './index'
import { createRandomFile, convertFromRawToVhd, createRandomVhdDirectory } from './tests/utils'
import { VhdAbstract } from './Vhd/VhdAbstract'
let tempDir
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('It opens a vhd file ( alias or not)', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
await createRandomFile(rawFileName, initalSize)
const vhdFileName = `${tempDir}/randomfile.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}/` })
const vhd = yield openVhd(handler, 'randomfile.vhd')
expect(vhd.header.cookie).toEqual('cxsparse')
expect(vhd.footer.cookie).toEqual('conectix')
await VhdAbstract.createAlias(handler, 'out.alias.vhd', 'randomfile.vhd')
const alias = yield openVhd(handler, 'out.alias.vhd')
expect(alias.header.cookie).toEqual('cxsparse')
expect(alias.footer.cookie).toEqual('conectix')
expect(alias._path?.path).toEqual('/randomfile.vhd')
})
})
test('It opens a vhd directory', async () => {
const initalSize = 4
const vhdDirectory = `${tempDir}/randomfile.dir`
await createRandomVhdDirectory(vhdDirectory, initalSize)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}/` })
const vhd = yield openVhd(handler, 'randomfile.dir')
expect(vhd.header.cookie).toEqual('cxsparse')
expect(vhd.footer.cookie).toEqual('conectix')
await VhdAbstract.createAlias(handler, 'out.alias.vhd', 'randomfile.dir')
const alias = yield openVhd(handler, 'out.alias.vhd')
expect(alias.header.cookie).toEqual('cxsparse')
expect(alias.footer.cookie).toEqual('conectix')
expect(alias._path).toEqual('randomfile.dir')
})
})

View File

@@ -0,0 +1,14 @@
import { resolveAlias } from './_resolveAlias'
import { VhdFile, VhdDirectory } from './'
export async function openVhd(handler, path) {
const resolved = await resolveAlias(handler, path)
try {
return await VhdFile.open(handler, resolved)
} catch (e) {
if (e.code !== 'EISDIR') {
throw e
}
return await VhdDirectory.open(handler, resolved)
}
}

View File

@@ -0,0 +1,87 @@
import { pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import asyncIteratorToStream from 'async-iterator-to-stream'
import execa from 'execa'
import fs from 'fs-extra'
import { randomBytes } from 'crypto'
const createRandomStream = asyncIteratorToStream(function* (size) {
while (size > 0) {
yield randomBytes(Math.min(size, 1024))
size -= 1024
}
})
export async function createRandomFile(name, sizeMB) {
const input = createRandomStream(sizeMB * 1024 * 1024)
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
}
export async function checkFile(vhdName) {
await execa('vhd-util', ['check', '-p', '-b', '-t', '-n', vhdName])
}
const RAW = 'raw'
const VHD = 'vpc'
const VMDK = 'vmdk'
async function convert(inputFormat, inputFile, outputFormat, outputFile) {
await execa('qemu-img', ['convert', `-f${inputFormat}`, '-O', outputFormat, inputFile, outputFile])
}
export async function convertFromRawToVhd(rawName, vhdName) {
await convert(RAW, rawName, VHD, vhdName)
}
export async function convertFromVhdToRaw(vhdName, rawName) {
await convert(VHD, vhdName, RAW, rawName)
}
export async function convertFromVmdkToRaw(vmdkName, rawName) {
await convert(VMDK, vmdkName, RAW, rawName)
}
export async function recoverRawContent(vhdName, rawName, originalSize) {
await checkFile(vhdName)
await convertFromVhdToRaw(vhdName, rawName)
if (originalSize !== undefined) {
await execa('truncate', ['-s', originalSize, rawName])
}
}
export async function createRandomVhdDirectory(path, sizeMB) {
fs.mkdir(path)
const rawFileName = `${path}/temp.raw`
await createRandomFile(rawFileName, sizeMB)
const vhdFileName = `${path}/vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const srcVhd = await fs.open(vhdFileName, 'r')
const footer = Buffer.alloc(512)
await fs.read(srcVhd, footer, 0, footer.length, 0)
await fs.writeFile(path + '/footer', footer)
const header = Buffer.alloc(1024)
await fs.read(srcVhd, header, 0, header.length, 512)
await fs.writeFile(path + '/header', header)
await fs.close(srcVhd)
// a BAT , with at most 512 blocks of 2MB
const bat = Buffer.alloc(512, 1)
await fs.writeFile(path + '/bat', bat)
// copy blocks
const srcRaw = await fs.open(rawFileName, 'r')
const blockDataSize = 512 * 4096
const bitmap = Buffer.alloc(4096)
await fs.mkdir(path + '/blocks/')
await fs.mkdir(path + '/blocks/1/')
for (let i = 0, offset = 0; i < sizeMB; i++, offset += blockDataSize) {
const blockData = Buffer.alloc(blockDataSize)
await fs.read(srcRaw, blockData, offset)
await fs.writeFile(path + '/blocks/1/' + i, Buffer.concat([bitmap, blockData]))
}
await fs.close(srcRaw)
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xapi-explore-sr",
"version": "0.3.0",
"version": "0.4.0",
"license": "ISC",
"description": "Display the list of VDIs (unmanaged and snapshots included) of a SR",
"keywords": [
@@ -39,7 +39,7 @@
"human-format": "^0.11.0",
"lodash": "^4.17.4",
"pw": "^0.0.4",
"xen-api": "^0.34.3"
"xen-api": "^0.35.1"
},
"devDependencies": {
"@babel/cli": "^7.1.5",

View File

@@ -3,6 +3,7 @@
import archy from 'archy'
import chalk from 'chalk'
import execPromise from 'exec-promise'
import firstDefined from '@xen-orchestra/defined'
import humanFormat from 'human-format'
import pw from 'pw'
import { createClient } from 'xen-api'
@@ -69,11 +70,13 @@ execPromise(async args => {
url = required('Host URL'),
user = required('Host user'),
password = await askPassword('Host password'),
httpProxy = firstDefined(process.env.http_proxy, process.env.HTTP_PROXY),
] = args
const xapi = createClient({
allowUnauthorized: true,
auth: { user, password },
httpProxy,
readOnly: true,
url,
watchEvents: false,

View File

@@ -52,6 +52,7 @@ Options:
- `auth`: credentials used to sign in (can also be specified in the URL)
- `readOnly = false`: if true, no methods with side-effects can be called
- `callTimeout`: number of milliseconds after which a call is considered failed (can also be a map of timeouts by methods)
- `httpProxy`: URL of the HTTP/HTTPS proxy used to reach the host, can include credentials
```js
// Force connection.

View File

@@ -34,6 +34,7 @@ Options:
- `auth`: credentials used to sign in (can also be specified in the URL)
- `readOnly = false`: if true, no methods with side-effects can be called
- `callTimeout`: number of milliseconds after which a call is considered failed (can also be a map of timeouts by methods)
- `httpProxy`: URL of the HTTP/HTTPS proxy used to reach the host, can include credentials
```js
// Force connection.

View File

@@ -8,6 +8,6 @@
"promise-toolbox": "^0.19.2",
"readable-stream": "^3.1.1",
"throttle": "^1.0.3",
"vhd-lib": "^1.2.0"
"vhd-lib": "^1.3.0"
}
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xen-api",
"version": "0.34.3",
"version": "0.35.1",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [
@@ -34,8 +34,8 @@
"bind-property-descriptor": "^1.0.0",
"blocked": "^1.2.1",
"debug": "^4.0.1",
"http-request-plus": "^0.12",
"jest-diff": "^26.4.2",
"http-request-plus": "^0.13.0",
"jest-diff": "^27.3.1",
"json-rpc-protocol": "^0.13.1",
"kindof": "^2.0.0",
"limit-concurrency-decorator": "^0.5.0",
@@ -43,7 +43,8 @@
"make-error": "^1.3.0",
"minimist": "^1.2.0",
"ms": "^2.1.1",
"promise-toolbox": "^0.19.2",
"promise-toolbox": "^0.20.0",
"proxy-agent": "^5.0.0",
"pw": "0.0.4",
"xmlrpc": "^1.3.2",
"xo-collection": "^0.5.0"

View File

@@ -2,10 +2,10 @@
import blocked from 'blocked'
import createDebug from 'debug'
import diff from 'jest-diff'
import minimist from 'minimist'
import pw from 'pw'
import { asCallback, fromCallback, fromEvent } from 'promise-toolbox'
import { diff } from 'jest-diff'
import { filter, find } from 'lodash'
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
import { start as createRepl } from 'repl'

View File

@@ -115,6 +115,7 @@ export class Xapi extends EventEmitter {
}
this._allowUnauthorized = opts.allowUnauthorized
this._httpProxy = opts.httpProxy
this._setUrl(url)
this._connected = new Promise(resolve => {
@@ -806,9 +807,9 @@ export class Xapi extends EventEmitter {
async _setHostAddressInUrl(url, host) {
const pool = this._pool
const poolMigrationNetwork = pool.other_config['xo:migrationNetwork']
const poolBackupNetwork = pool.other_config['xo:backupNetwork']
if (host === undefined) {
if (poolMigrationNetwork === undefined) {
if (poolBackupNetwork === undefined) {
const xapiUrl = this._url
url.hostname = xapiUrl.hostname
url.port = xapiUrl.port
@@ -819,16 +820,16 @@ export class Xapi extends EventEmitter {
}
let { address } = host
if (poolMigrationNetwork !== undefined) {
if (poolBackupNetwork !== undefined) {
const hostPifs = new Set(host.PIFs)
try {
const networkRef = await this._roCall('network.get_by_uuid', [poolMigrationNetwork])
const networkRef = await this._roCall('network.get_by_uuid', [poolBackupNetwork])
const networkPifs = await this.getField('network', networkRef, 'PIFs')
const migrationNetworkPifRef = networkPifs.find(hostPifs.has, hostPifs)
address = await this.getField('PIF', migrationNetworkPifRef, 'IP')
const backupNetworkPifRef = networkPifs.find(hostPifs.has, hostPifs)
address = await this.getField('PIF', backupNetworkPifRef, 'IP')
} catch (error) {
console.warn('unable to get the host address linked to the pool migration network', poolMigrationNetwork, error)
console.warn('unable to get the host address linked to the pool backup network', poolBackupNetwork, error)
}
}
@@ -851,6 +852,7 @@ export class Xapi extends EventEmitter {
rejectUnauthorized: !this._allowUnauthorized,
},
url,
httpProxy: this._httpProxy,
})
this._url = url
}

View File

@@ -1,4 +1,5 @@
import httpRequestPlus from 'http-request-plus'
import ProxyAgent from 'proxy-agent'
import { format, parse } from 'json-rpc-protocol'
import XapiError from '../_XapiError'
@@ -6,7 +7,11 @@ import XapiError from '../_XapiError'
import UnsupportedTransport from './_UnsupportedTransport'
// https://github.com/xenserver/xenadmin/blob/0df39a9d83cd82713f32d24704852a0fd57b8a64/XenModel/XenAPI/Session.cs#L403-L433
export default ({ secureOptions, url }) => {
export default ({ secureOptions, url, httpProxy }) => {
let agent
if (httpProxy !== undefined) {
agent = new ProxyAgent(httpProxy)
}
return (method, args) =>
httpRequestPlus
.post(url, {
@@ -17,6 +22,7 @@ export default ({ secureOptions, url }) => {
'Content-Type': 'application/json',
},
path: '/jsonrpc',
agent,
})
.readAll('utf8')
.then(

View File

@@ -1,5 +1,6 @@
import { createClient, createSecureClient } from 'xmlrpc'
import { promisify } from 'promise-toolbox'
import ProxyAgent from 'proxy-agent'
import XapiError from '../_XapiError'
@@ -70,10 +71,15 @@ const parseResult = result => {
throw new UnsupportedTransport()
}
export default ({ secureOptions, url: { hostname, port, protocol } }) => {
export default ({ secureOptions, url: { hostname, port, protocol }, httpProxy }) => {
const secure = protocol === 'https:'
let agent
if (httpProxy !== undefined) {
agent = new ProxyAgent(httpProxy)
}
const client = (secure ? createSecureClient : createClient)({
...(secure ? secureOptions : undefined),
agent,
host: hostname,
path: '/json',
port,

View File

@@ -1,5 +1,6 @@
import { createClient, createSecureClient } from 'xmlrpc'
import { promisify } from 'promise-toolbox'
import ProxyAgent from 'proxy-agent'
import XapiError from '../_XapiError'
@@ -30,10 +31,15 @@ const parseResult = result => {
return result.Value
}
export default ({ secureOptions, url: { hostname, port, protocol } }) => {
export default ({ secureOptions, url: { hostname, port, protocol, httpProxy } }) => {
const secure = protocol === 'https:'
let agent
if (httpProxy !== undefined) {
agent = new ProxyAgent(httpProxy)
}
const client = (secure ? createSecureClient : createClient)({
...(secure ? secureOptions : undefined),
agent,
host: hostname,
port,
})

View File

@@ -34,7 +34,7 @@
"chalk": "^4.1.0",
"exec-promise": "^0.7.0",
"fs-extra": "^10.0.0",
"http-request-plus": "^0.12",
"http-request-plus": "^0.13.0",
"human-format": "^0.11.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",
@@ -42,7 +42,7 @@
"mkdirp": "^1.0.4",
"pretty-ms": "^7.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.19.2",
"promise-toolbox": "^0.20.0",
"pump": "^3.0.0",
"pw": "^0.0.4",
"strip-indent": "^3.0.0",

View File

@@ -14,7 +14,7 @@ const chalk = require('chalk')
const forEach = require('lodash/forEach')
const fromCallback = require('promise-toolbox/fromCallback')
const getKeys = require('lodash/keys')
const hrp = require('http-request-plus').default
const hrp = require('http-request-plus')
const humanFormat = require('human-format')
const identity = require('lodash/identity')
const isObject = require('lodash/isObject')

View File

@@ -30,7 +30,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^7.0.2",
"promise-toolbox": "^0.19.2",
"promise-toolbox": "^0.20.0",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -40,7 +40,7 @@
"xo-lib": "^0.10.1"
},
"devDependencies": {
"@types/node": "^14.0.9",
"@types/node": "^16.11.6",
"@types/through2": "^2.0.31",
"typescript": "^3.1.6"
},

View File

@@ -49,7 +49,7 @@
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/log": "^0.3.0",
"async-iterator-to-stream": "^1.1.0",
"promise-toolbox": "^0.19.2",
"promise-toolbox": "^0.20.0",
"readable-stream": "^3.5.0",
"xo-common": "^0.7.0"
},

View File

@@ -28,15 +28,15 @@
"xo-server-auth-ldap": "dist/test-cli.js"
},
"engines": {
"node": ">=10"
"node": ">=12"
},
"dependencies": {
"@xen-orchestra/log": "^0.3.0",
"ensure-array": "^1.0.0",
"exec-promise": "^0.7.0",
"inquirer": "^8.0.0",
"ldapts": "^2.2.1",
"promise-toolbox": "^0.19.2"
"ldapts": "^3.1.1",
"promise-toolbox": "^0.20.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-netbox",
"version": "0.3.1",
"version": "0.3.3",
"license": "AGPL-3.0-or-later",
"description": "Synchronizes pools managed by Xen Orchestra with Netbox",
"keywords": [
@@ -31,7 +31,8 @@
"dependencies": {
"@xen-orchestra/log": "^0.3.0",
"ipaddr.js": "^2.0.1",
"lodash": "^4.17.21"
"lodash": "^4.17.21",
"semver": "^7.3.5"
},
"devDependencies": {
"@babel/cli": "^7.13.16",

View File

@@ -1,5 +1,6 @@
import assert from 'assert'
import ipaddr from 'ipaddr.js'
import semver from 'semver'
import { createLogger } from '@xen-orchestra/log'
import { find, flatten, forEach, groupBy, isEmpty, keyBy, mapValues, omit, trimEnd, zipObject } from 'lodash'
@@ -44,6 +45,7 @@ class Netbox {
#endpoint
#intervalToken
#loaded
#netboxApiVersion
#pools
#removeApiMethods
#syncInterval
@@ -113,6 +115,7 @@ class Netbox {
const httpRequest = async () => {
try {
const response = await this.#xo.httpRequest(url, options)
this.#netboxApiVersion = response.headers['api-version']
const body = await response.readAll()
if (body.length > 0) {
return JSON.parse(body)
@@ -332,10 +335,18 @@ class Netbox {
vcpus: vm.CPUs.number,
disk,
memory: Math.floor(vm.memory.dynamic[1] / M),
status: vm.power_state === 'Running' ? 'active' : 'offline',
custom_fields: { uuid: vm.uuid },
}
if (this.#netboxApiVersion !== undefined) {
// https://netbox.readthedocs.io/en/stable/release-notes/version-2.7/#api-choice-fields-now-use-string-values-3569
if (semver.satisfies(semver.coerce(this.#netboxApiVersion).version, '>=2.7.0')) {
updatedVm.status = vm.power_state === 'Running' ? 'active' : 'offline'
} else {
updatedVm.status = vm.power_state === 'Running' ? 1 : 0
}
}
if (oldNetboxVm === undefined) {
vmsToCreate.push(updatedVm)
} else {
@@ -651,7 +662,7 @@ class Netbox {
}
async test() {
const randomSuffix = Math.random().toString(36).slice(2)
const randomSuffix = Math.random().toString(36).slice(2, 11)
const name = '[TMP] Xen Orchestra Netbox plugin test - ' + randomSuffix
await this.#makeRequest('/virtualization/cluster-types/', 'POST', {
name,

View File

@@ -18,7 +18,7 @@
},
"version": "1.0.5",
"engines": {
"node": ">=8.10"
"node": ">=10"
},
"devDependencies": {
"@babel/cli": "^7.4.4",
@@ -29,13 +29,13 @@
"cross-env": "^7.0.2"
},
"dependencies": {
"@vates/coalesce-calls": "^0.1.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/openflow": "^0.1.1",
"@vates/coalesce-calls": "^0.1.0",
"ipaddr.js": "^1.9.1",
"ipaddr.js": "^2.0.1",
"lodash": "^4.17.11",
"node-openssl-cert": "^0.0.117",
"promise-toolbox": "^0.19.2",
"node-openssl-cert": "^0.1.34",
"promise-toolbox": "^0.20.0",
"uuid": "^8.3.1"
},
"private": true,

View File

@@ -31,9 +31,9 @@
"app-conf": "^0.9.0",
"babel-plugin-lodash": "^3.2.11",
"golike-defer": "^0.5.1",
"jest": "^26.6.3",
"jest": "^27.3.1",
"lodash": "^4.17.11",
"promise-toolbox": "^0.19.2",
"promise-toolbox": "^0.20.0",
"xo-collection": "^0.5.0",
"xo-common": "^0.7.0",
"xo-lib": "^0.10.1"

View File

@@ -30,7 +30,7 @@
"dependencies": {
"nodemailer": "^6.1.0",
"nodemailer-markdown": "^1.0.1",
"promise-toolbox": "^0.19.2"
"promise-toolbox": "^0.20.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -29,7 +29,7 @@
"node": ">=6"
},
"dependencies": {
"promise-toolbox": "^0.19.2",
"promise-toolbox": "^0.20.0",
"slack-node": "^0.1.8"
},
"devDependencies": {

View File

@@ -30,7 +30,7 @@
},
"dependencies": {
"node-xmpp-client": "^3.0.0",
"promise-toolbox": "^0.19.2"
"promise-toolbox": "^0.20.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -37,7 +37,7 @@
"html-minifier": "^4.0.0",
"human-format": "^0.11.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.19.2"
"promise-toolbox": "^0.20.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -76,6 +76,8 @@ defaultSignInPage = '/signin'
throttlingDelay = '2 seconds'
[backups]
disableMergeWorker = false
# Mode to use for newly created backup directories
#
# https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.82.3",
"version": "5.83.0",
"license": "AGPL-3.0-or-later",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -35,7 +35,7 @@
"@vates/parse-duration": "^0.1.1",
"@vates/read-chunk": "^0.1.2",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.13.0",
"@xen-orchestra/backups": "^0.15.1",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/defined": "^0.0.1",
"@xen-orchestra/emit-async": "^0.1.0",
@@ -45,7 +45,7 @@
"@xen-orchestra/mixins": "^0.1.1",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/template": "^0.1.0",
"@xen-orchestra/xapi": "^0.7.0",
"@xen-orchestra/xapi": "^0.8.0",
"ajv": "^8.0.3",
"app-conf": "^0.9.0",
"async-iterator-to-stream": "^1.0.1",
@@ -75,11 +75,11 @@
"helmet": "^3.9.0",
"highland": "^2.11.1",
"http-proxy": "^1.16.2",
"http-request-plus": "^0.12",
"http-request-plus": "^0.13.0",
"http-server-plus": "^0.11.0",
"human-format": "^0.11.0",
"iterable-backoff": "^0.1.0",
"js-yaml": "^3.10.0",
"js-yaml": "^4.1.0",
"json-rpc-peer": "^0.17.0",
"json-stringify-safe": "^5.0.1",
"json5": "^2.0.1",
@@ -96,25 +96,25 @@
"multikey-hash": "^1.0.4",
"multiparty": "^4.2.2",
"ndjson": "^2.0.0",
"openpgp": "^4.10.4",
"openpgp": "^5.0.0",
"otplib": "^11.0.0",
"partial-stream": "0.0.0",
"passport": "^0.4.0",
"passport-local": "^1.0.0",
"promise-toolbox": "^0.19.2",
"proxy-agent": "^4.0.0",
"promise-toolbox": "^0.20.0",
"proxy-agent": "^5.0.0",
"pug": "^3.0.0",
"pump": "^3.0.0",
"pumpify": "^2.0.0",
"pw": "^0.0.4",
"readable-stream": "^3.2.0",
"redis": "^3.0.2",
"schema-inspector": "^1.7.0",
"schema-inspector": "^2.0.1",
"semver": "^7.3.2",
"serve-static": "^1.13.1",
"set-cookie-parser": "^2.3.5",
"source-map-support": "^0.5.16",
"split2": "^3.1.1",
"split2": "^4.1.0",
"stoppable": "^1.0.5",
"subleveldown": "^5.0.1",
"tar-stream": "^2.0.1",
@@ -122,10 +122,10 @@
"unzipper": "^0.10.5",
"uuid": "^8.3.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^1.2.0",
"ws": "^7.1.2",
"vhd-lib": "^1.3.0",
"ws": "^8.2.3",
"xdg-basedir": "^4.0.0",
"xen-api": "^0.34.3",
"xen-api": "^0.35.1",
"xo-acl-resolver": "^0.4.1",
"xo-collection": "^0.5.0",
"xo-common": "^0.7.0",

View File

@@ -166,12 +166,12 @@ async function handleImport(req, res, { type, name, description, vmdkData, srId,
if (part.name !== 'file') {
promises.push(
(async () => {
const buffer = await getStream.buffer(part)
vmdkData[part.name] = new Uint32Array(
buffer.buffer,
buffer.byteOffset,
buffer.length / Uint32Array.BYTES_PER_ELEMENT
)
const buffer = await getStream.buffer(part)
vmdkData[part.name] = new Uint32Array(
buffer.buffer,
buffer.byteOffset,
buffer.length / Uint32Array.BYTES_PER_ELEMENT
)
})()
)
} else {

View File

@@ -183,14 +183,15 @@ start.resolve = {
// -------------------------------------------------------------------
export function stop({ host }) {
return this.getXapi(host).shutdownHost(host._xapiId)
export function stop({ host, bypassEvacuate }) {
return this.getXapi(host).shutdownHost(host._xapiId, { bypassEvacuate })
}
stop.description = 'stop the host'
stop.params = {
id: { type: 'string' },
bypassEvacuate: { type: 'boolean', optional: true },
}
stop.resolve = {

View File

@@ -7,6 +7,7 @@ export async function set({
name_description: nameDescription,
name_label: nameLabel,
backupNetwork,
migrationNetwork,
}) {
pool = this.getXapiObject(pool)
@@ -15,6 +16,7 @@ export async function set({
nameDescription !== undefined && pool.set_name_description(nameDescription),
nameLabel !== undefined && pool.set_name_label(nameLabel),
migrationNetwork !== undefined && pool.update_other_config('xo:migrationNetwork', migrationNetwork),
backupNetwork !== undefined && pool.update_other_config('xo:backupNetwork', backupNetwork),
])
}
@@ -30,6 +32,10 @@ set.params = {
type: 'string',
optional: true,
},
backupNetwork: {
type: ['string', 'null'],
optional: true,
},
migrationNetwork: {
type: ['string', 'null'],
optional: true,

View File

@@ -36,6 +36,10 @@ add.params = {
optional: true,
type: 'boolean',
},
httpProxy: {
optional: true,
type: 'string',
},
}
// -------------------------------------------------------------------
@@ -104,6 +108,10 @@ set.params = {
optional: true,
type: 'boolean',
},
httpProxy: {
optional: true,
type: ['string', 'null'],
},
}
// -------------------------------------------------------------------

View File

@@ -659,22 +659,28 @@ export const clone = defer(async function ($defer, { vm, name, full_copy: fullCo
await checkPermissionOnSrs.call(this, vm)
const xapi = this.getXapi(vm)
const { $id: cloneId, $ref: cloneRef } = await xapi.cloneVm(vm._xapiRef, {
const newVm = await xapi.cloneVm(vm._xapiRef, {
nameLabel: name,
fast: !fullCopy,
})
$defer.onFailure(() => xapi.VM_destroy(cloneRef))
$defer.onFailure(() => xapi.VM_destroy(newVm.$ref))
// A snapshot may have its `is_a_template` flag set to true, which isn't
// automatically set to false when cloning it
if (vm.type !== 'VM-template') {
await newVm.set_is_a_template(false)
}
const isAdmin = this.user.permission === 'admin'
if (!isAdmin) {
await this.addAcl(this.user.id, cloneId, 'admin')
await this.addAcl(this.user.id, newVm.$id, 'admin')
}
if (vm.resourceSet !== undefined) {
await this.allocateLimitsInResourceSet(await this.computeVmResourcesUsage(vm), vm.resourceSet, isAdmin)
}
return cloneId
return newVm.$id
})
clone.params = {
@@ -691,25 +697,32 @@ clone.resolve = {
// TODO: implement resource sets
export async function copy({ compress, name: nameLabel, sr, vm }) {
let newVm
if (vm.$pool === sr.$pool) {
if (vm.power_state === 'Running') {
await checkPermissionOnSrs.call(this, vm)
}
return this.getXapi(vm)
.copyVm(vm._xapiId, {
newVm = await this.getXapi(vm).copyVm(vm._xapiId, {
nameLabel,
srOrSrId: sr._xapiId,
})
} else {
newVm = (
await this.getXapi(vm).remoteCopyVm(vm._xapiId, this.getXapi(sr), sr._xapiId, {
compress,
nameLabel,
srOrSrId: sr._xapiId,
})
.then(vm => vm.$id)
).vm
}
return this.getXapi(vm)
.remoteCopyVm(vm._xapiId, this.getXapi(sr), sr._xapiId, {
compress,
nameLabel,
})
.then(({ vm }) => vm.$id)
// A snapshot may have its `is_a_template` flag set to true, which isn't
// automatically set to false when copying it
if (vm.type !== 'VM-template') {
await newVm.set_is_a_template(false)
}
return newVm.$id
}
copy.params = {
@@ -1037,41 +1050,45 @@ async function handleVmImport(req, res, { data, srId, type, xapi }) {
// Timeout seems to be broken in Node 4.
// See https://github.com/nodejs/node/issues/3319
req.setTimeout(43200000) // 12 hours
const vm = await new Promise((resolve, reject) => {
const form = new multiparty.Form()
const promises = []
const tables = {}
form.on('error', reject)
form.on('part', async part => {
try {
if (part.name !== 'file') {
promises.push(
(async () => {
if (!(part.filename in tables)) {
tables[part.filename] = {}
}
const buffer = await getStream.buffer(part)
tables[part.filename][part.name] = new Uint32Array(
buffer.buffer,
buffer.byteOffset,
buffer.length / Uint32Array.BYTES_PER_ELEMENT
// expect "multipart/form-data; boundary=something"
const contentType = req.headers['content-type']
const vm = await (contentType != undefined && contentType.startsWith('multipart/form-data')
? new Promise((resolve, reject) => {
const form = new multiparty.Form()
const promises = []
const tables = {}
form.on('error', reject)
form.on('part', async part => {
try {
if (part.name !== 'file') {
promises.push(
(async () => {
if (!(part.filename in tables)) {
tables[part.filename] = {}
}
const buffer = await getStream.buffer(part)
tables[part.filename][part.name] = new Uint32Array(
buffer.buffer,
buffer.byteOffset,
buffer.length / Uint32Array.BYTES_PER_ELEMENT
)
data.tables = tables
})()
)
data.tables = tables
})()
)
} else {
await Promise.all(promises)
// XVA files are directly sent to xcp-ng who wants a content-length
part.length = part.byteCount
resolve(xapi.importVm(part, { data, srId, type }))
}
} catch (e) {
// multiparty is not promise-aware, we have to chain errors ourselves.
reject(e)
}
})
form.parse(req)
})
} else {
await Promise.all(promises)
// XVA files are directly sent to xcp-ng who wants a content-length
part.length = part.byteCount
resolve(xapi.importVm(part, { data, srId, type }))
}
} catch (e) {
// multiparty is not promise-aware, we have to chain errors ourselves.
reject(e)
}
})
form.parse(req)
})
: xapi.importVm(req, { data, srId, type }))
res.end(format.response(0, vm.$id))
}

View File

@@ -21,7 +21,7 @@ import pw from 'pw'
import serveStatic from 'serve-static'
import stoppable from 'stoppable'
import WebServer from 'http-server-plus'
import WebSocket from 'ws'
import WebSocket, { WebSocketServer } from 'ws'
import xdg from 'xdg-basedir'
import { createLogger } from '@xen-orchestra/log'
import { createRequire } from 'module'
@@ -505,7 +505,7 @@ const setUpProxies = (express, opts, xo) => {
})
// WebSocket proxy.
const webSocketServer = new WebSocket.Server({
const webSocketServer = new WebSocketServer({
noServer: true,
})
xo.hooks.on('stop', () => fromCallback.call(webSocketServer, 'close'))
@@ -543,7 +543,7 @@ const setUpStaticFiles = (express, opts) => {
// ===================================================================
const setUpApi = (webServer, xo, config) => {
const webSocketServer = new WebSocket.Server({
const webSocketServer = new WebSocketServer({
...config.apiWebSocketOptions,
noServer: true,
@@ -613,7 +613,7 @@ const setUpApi = (webServer, xo, config) => {
const CONSOLE_PROXY_PATH_RE = /^\/api\/consoles\/(.*)$/
const setUpConsoleProxy = (webServer, xo) => {
const webSocketServer = new WebSocket.Server({
const webSocketServer = new WebSocketServer({
noServer: true,
})
xo.hooks.on('stop', () => fromCallback.call(webSocketServer, 'close'))

View File

@@ -297,10 +297,13 @@ export default class Xapi extends XapiBase {
await this.call('host.syslog_reconfigure', host.$ref)
}
async shutdownHost(hostId, force = false) {
async shutdownHost(hostId, { force = false, bypassEvacuate = false }) {
const host = this.getObject(hostId)
await this.clearHost(host, force)
if (bypassEvacuate) {
await this.call('host.disable', host.$ref)
} else {
await this.clearHost(host, force)
}
await this.callAsync('host.shutdown', host.$ref)
}

View File

@@ -41,13 +41,11 @@ export default class ConfigManagement {
if (passphrase !== undefined) {
config = Buffer.from(
(
await openpgp.encrypt({
armor: false,
message: openpgp.message.fromText(config),
passwords: passphrase,
})
).message.packets.write()
await openpgp.encrypt({
format: 'binary',
message: await openpgp.createMessage({ text: config }),
passwords: passphrase,
})
)
}
@@ -58,8 +56,7 @@ export default class ConfigManagement {
if (passphrase !== undefined) {
config = (
await openpgp.decrypt({
format: 'utf8',
message: await openpgp.message.read(config),
message: await openpgp.readMessage({ binaryMessage: config }),
passwords: passphrase,
})
).data

Some files were not shown because too many files have changed in this diff Show More