Compare commits
1 Commits
lazy-mixin
...
self-signe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5eb0055ebb |
@@ -9,7 +9,7 @@ exports.EventListenersManager = class EventListenersManager {
|
||||
}
|
||||
|
||||
add(type, listener) {
|
||||
let listeners = this._listeners.get(type)
|
||||
let listeners = this._listeners[type]
|
||||
if (listeners === undefined) {
|
||||
listeners = new Set()
|
||||
this._listeners.set(type, listeners)
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const t = require('tap')
|
||||
const { EventEmitter } = require('events')
|
||||
|
||||
const { EventListenersManager } = require('./')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
// function spy (impl = Function.prototype) {
|
||||
// function spy() {
|
||||
// spy.calls.push([Array.from(arguments), this])
|
||||
// }
|
||||
// spy.calls = []
|
||||
// return spy
|
||||
// }
|
||||
|
||||
function assertListeners(t, event, listeners) {
|
||||
t.strictSame(t.context.ee.listeners(event), listeners)
|
||||
}
|
||||
|
||||
t.beforeEach(function (t) {
|
||||
t.context.ee = new EventEmitter()
|
||||
t.context.em = new EventListenersManager(t.context.ee)
|
||||
})
|
||||
|
||||
t.test('.add adds a listener', function (t) {
|
||||
t.context.em.add('foo', noop)
|
||||
|
||||
assertListeners(t, 'foo', [noop])
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('.add does not add a duplicate listener', function (t) {
|
||||
t.context.em.add('foo', noop).add('foo', noop)
|
||||
|
||||
assertListeners(t, 'foo', [noop])
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('.remove removes a listener', function (t) {
|
||||
t.context.em.add('foo', noop).remove('foo', noop)
|
||||
|
||||
assertListeners(t, 'foo', [])
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('.removeAll removes all listeners of a given type', function (t) {
|
||||
t.context.em.add('foo', noop).add('bar', noop).removeAll('foo')
|
||||
|
||||
assertListeners(t, 'foo', [])
|
||||
assertListeners(t, 'bar', [noop])
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('.removeAll removes all listeners', function (t) {
|
||||
t.context.em.add('foo', noop).add('bar', noop).removeAll()
|
||||
|
||||
assertListeners(t, 'foo', [])
|
||||
assertListeners(t, 'bar', [])
|
||||
|
||||
t.end()
|
||||
})
|
||||
@@ -37,10 +37,6 @@
|
||||
"license": "ISC",
|
||||
"version": "1.0.0",
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "tap --branches=72"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.2.0"
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
### `readChunk(stream, [size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
- returns `null` if the stream has ended
|
||||
|
||||
```js
|
||||
import { readChunk } from '@vates/read-chunk'
|
||||
@@ -14,13 +11,3 @@ import { readChunk } from '@vates/read-chunk'
|
||||
}
|
||||
})()
|
||||
```
|
||||
|
||||
### `readChunkStrict(stream, [size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
@@ -16,12 +16,9 @@ Installation of the [npm package](https://npmjs.org/package/@vates/read-chunk):
|
||||
|
||||
## Usage
|
||||
|
||||
### `readChunk(stream, [size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
- returns `null` if the stream has ended
|
||||
|
||||
```js
|
||||
import { readChunk } from '@vates/read-chunk'
|
||||
@@ -33,16 +30,6 @@ import { readChunk } from '@vates/read-chunk'
|
||||
})()
|
||||
```
|
||||
|
||||
### `readChunkStrict(stream, [size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
|
||||
@@ -30,22 +30,3 @@ const readChunk = (stream, size) =>
|
||||
onReadable()
|
||||
})
|
||||
exports.readChunk = readChunk
|
||||
|
||||
exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
const chunk = await readChunk(stream, size)
|
||||
if (chunk === null) {
|
||||
throw new Error('stream has ended without data')
|
||||
}
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error('stream has ended with not enough data')
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
},
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const { readChunk, readChunkStrict } = require('./')
|
||||
const { readChunk } = require('./')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
@@ -43,27 +43,3 @@ describe('readChunk', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
describe('readChunkStrict', function () {
|
||||
it('throws if stream is empty', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream([])))
|
||||
expect(error).toBeInstanceOf(Error)
|
||||
expect(error.message).toBe('stream has ended without data')
|
||||
expect(error.chunk).toEqual(undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
|
||||
expect(error).toBeInstanceOf(Error)
|
||||
expect(error.message).toBe('stream has ended with not enough data')
|
||||
expect(error.chunk).toEqual(Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.25.0",
|
||||
"@xen-orchestra/fs": "^1.0.3",
|
||||
"@xen-orchestra/backups": "^0.23.0",
|
||||
"@xen-orchestra/fs": "^1.0.1",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
@@ -27,7 +27,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.7.3",
|
||||
"version": "0.7.1",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -6,7 +6,7 @@ const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('./extractIdsFromSimplePattern.js')
|
||||
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern.js')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { VmBackup } = require('./_VmBackup.js')
|
||||
|
||||
@@ -153,13 +153,6 @@ class VmBackup {
|
||||
errors.push(error)
|
||||
this.delete(writer)
|
||||
warn(warnMessage, { error, writer: writer.constructor.name })
|
||||
|
||||
// these two steps are the only one that are not already in their own sub tasks
|
||||
if (warnMessage === 'writer.checkBaseVdis()' || warnMessage === 'writer.beforeBackup()') {
|
||||
Task.warning(
|
||||
`the writer ${writer.constructor.name} has failed the step ${warnMessage} with error ${error.message}. It won't be used anymore in this job execution.`
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
if (writers.size === 0) {
|
||||
|
||||
@@ -47,32 +47,42 @@ const computeVhdsSize = (handler, vhdPaths) =>
|
||||
// | |
|
||||
// \___________rename_____________/
|
||||
|
||||
async function mergeVhdChain(chain, { handler, logInfo, remove, merge }) {
|
||||
async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
|
||||
assert(chain.length >= 2)
|
||||
const chainCopy = [...chain]
|
||||
const parent = chainCopy.pop()
|
||||
const children = chainCopy
|
||||
|
||||
if (merge) {
|
||||
logInfo(`merging children into parent`, { childrenCount: children.length, parent })
|
||||
onLog(`merging ${children.length} children into ${parent}`)
|
||||
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
logInfo(`merging children in progress`, { children, parent, doneCount: done, totalCount: total })
|
||||
onLog(`merging ${children.join(',')} into ${parent}: ${done}/${total}`)
|
||||
}
|
||||
}, 10e3)
|
||||
|
||||
const mergedSize = await mergeVhd(handler, parent, handler, children, {
|
||||
logInfo,
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
remove,
|
||||
})
|
||||
|
||||
clearInterval(handle)
|
||||
const mergeTargetChild = children.shift()
|
||||
await Promise.all([
|
||||
VhdAbstract.rename(handler, parent, mergeTargetChild),
|
||||
asyncMap(children, child => {
|
||||
onLog(`the VHD ${child} is already merged`)
|
||||
if (remove) {
|
||||
onLog(`deleting merged VHD ${child}`)
|
||||
return VhdAbstract.unlink(handler, child)
|
||||
}
|
||||
}),
|
||||
])
|
||||
|
||||
return mergedSize
|
||||
}
|
||||
}
|
||||
@@ -115,19 +125,14 @@ const listVhds = async (handler, vmDir) => {
|
||||
return { vhds, interruptedVhds, aliases }
|
||||
}
|
||||
|
||||
async function checkAliases(
|
||||
aliasPaths,
|
||||
targetDataRepository,
|
||||
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
|
||||
) {
|
||||
async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog = noop, remove = false }) {
|
||||
const aliasFound = []
|
||||
for (const path of aliasPaths) {
|
||||
const target = await resolveVhdAlias(handler, path)
|
||||
|
||||
if (!isVhdFile(target)) {
|
||||
logWarn('alias references non VHD target', { path, target })
|
||||
onLog(`Alias ${path} references a non vhd target: ${target}`)
|
||||
if (remove) {
|
||||
logInfo('removing alias and non VHD target', { path, target })
|
||||
await handler.unlink(target)
|
||||
await handler.unlink(path)
|
||||
}
|
||||
@@ -142,13 +147,13 @@ async function checkAliases(
|
||||
// error during dispose should not trigger a deletion
|
||||
}
|
||||
} catch (error) {
|
||||
logWarn('missing or broken alias target', { target, path, error })
|
||||
onLog(`target ${target} of alias ${path} is missing or broken`, { error })
|
||||
if (remove) {
|
||||
try {
|
||||
await VhdAbstract.unlink(handler, path)
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
logWarn('error deleting alias target', { target, path, error })
|
||||
} catch (e) {
|
||||
if (e.code !== 'ENOENT') {
|
||||
onLog(`Error while deleting target ${target} of alias ${path}`, { error: e })
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -165,22 +170,20 @@ async function checkAliases(
|
||||
|
||||
entries.forEach(async entry => {
|
||||
if (!aliasFound.includes(entry)) {
|
||||
logWarn('no alias references VHD', { entry })
|
||||
onLog(`the Vhd ${entry} is not referenced by a an alias`)
|
||||
if (remove) {
|
||||
logInfo('deleting unaliased VHD')
|
||||
await VhdAbstract.unlink(handler, entry)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
exports.checkAliases = checkAliases
|
||||
|
||||
const defaultMergeLimiter = limitConcurrency(1)
|
||||
|
||||
exports.cleanVm = async function cleanVm(
|
||||
vmDir,
|
||||
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, logInfo = noop, logWarn = console.warn }
|
||||
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, onLog = noop }
|
||||
) {
|
||||
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
|
||||
|
||||
@@ -211,9 +214,9 @@ exports.cleanVm = async function cleanVm(
|
||||
})
|
||||
} catch (error) {
|
||||
vhds.delete(path)
|
||||
logWarn('VHD check error', { path, error })
|
||||
onLog(`error while checking the VHD with path ${path}`, { error })
|
||||
if (error?.code === 'ERR_ASSERTION' && remove) {
|
||||
logInfo('deleting broken path', { path })
|
||||
onLog(`deleting broken ${path}`)
|
||||
return VhdAbstract.unlink(handler, path)
|
||||
}
|
||||
}
|
||||
@@ -225,12 +228,12 @@ exports.cleanVm = async function cleanVm(
|
||||
const statePath = interruptedVhds.get(interruptedVhd)
|
||||
interruptedVhds.delete(interruptedVhd)
|
||||
|
||||
logWarn('orphan merge state', {
|
||||
onLog('orphan merge state', {
|
||||
mergeStatePath: statePath,
|
||||
missingVhdPath: interruptedVhd,
|
||||
})
|
||||
if (remove) {
|
||||
logInfo('deleting orphan merge state', { statePath })
|
||||
onLog(`deleting orphan merge state ${statePath}`)
|
||||
await handler.unlink(statePath)
|
||||
}
|
||||
}
|
||||
@@ -239,7 +242,7 @@ exports.cleanVm = async function cleanVm(
|
||||
// check if alias are correct
|
||||
// check if all vhd in data subfolder have a corresponding alias
|
||||
await asyncMap(Object.keys(aliases), async dir => {
|
||||
await checkAliases(aliases[dir], `${dir}/data`, { handler, logInfo, logWarn, remove })
|
||||
await checkAliases(aliases[dir], `${dir}/data`, { handler, onLog, remove })
|
||||
})
|
||||
|
||||
// remove VHDs with missing ancestors
|
||||
@@ -261,9 +264,9 @@ exports.cleanVm = async function cleanVm(
|
||||
if (!vhds.has(parent)) {
|
||||
vhds.delete(vhdPath)
|
||||
|
||||
logWarn('parent VHD is missing', { parent, vhdPath })
|
||||
onLog(`the parent ${parent} of the VHD ${vhdPath} is missing`)
|
||||
if (remove) {
|
||||
logInfo('deleting orphan VHD', { vhdPath })
|
||||
onLog(`deleting orphan VHD ${vhdPath}`)
|
||||
deletions.push(VhdAbstract.unlink(handler, vhdPath))
|
||||
}
|
||||
}
|
||||
@@ -300,7 +303,7 @@ exports.cleanVm = async function cleanVm(
|
||||
// check is not good enough to delete the file, the best we can do is report
|
||||
// it
|
||||
if (!(await this.isValidXva(path))) {
|
||||
logWarn('XVA might be broken', { path })
|
||||
onLog(`the XVA with path ${path} is potentially broken`)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -314,7 +317,7 @@ exports.cleanVm = async function cleanVm(
|
||||
try {
|
||||
metadata = JSON.parse(await handler.readFile(json))
|
||||
} catch (error) {
|
||||
logWarn('failed to read metadata file', { json, error })
|
||||
onLog(`failed to read metadata file ${json}`, { error })
|
||||
jsons.delete(json)
|
||||
return
|
||||
}
|
||||
@@ -325,9 +328,9 @@ exports.cleanVm = async function cleanVm(
|
||||
if (xvas.has(linkedXva)) {
|
||||
unusedXvas.delete(linkedXva)
|
||||
} else {
|
||||
logWarn('metadata XVA is missing', { json })
|
||||
onLog(`the XVA linked to the metadata ${json} is missing`)
|
||||
if (remove) {
|
||||
logInfo('deleting incomplete backup', { json })
|
||||
onLog(`deleting incomplete backup ${json}`)
|
||||
jsons.delete(json)
|
||||
await handler.unlink(json)
|
||||
}
|
||||
@@ -348,9 +351,9 @@ exports.cleanVm = async function cleanVm(
|
||||
vhdsToJSons[path] = json
|
||||
})
|
||||
} else {
|
||||
logWarn('some metadata VHDs are missing', { json, missingVhds })
|
||||
onLog(`Some VHDs linked to the metadata ${json} are missing`, { missingVhds })
|
||||
if (remove) {
|
||||
logInfo('deleting incomplete backup', { json })
|
||||
onLog(`deleting incomplete backup ${json}`)
|
||||
jsons.delete(json)
|
||||
await handler.unlink(json)
|
||||
}
|
||||
@@ -391,9 +394,9 @@ exports.cleanVm = async function cleanVm(
|
||||
}
|
||||
}
|
||||
|
||||
logWarn('unused VHD', { vhd })
|
||||
onLog(`the VHD ${vhd} is unused`)
|
||||
if (remove) {
|
||||
logInfo('deleting unused VHD', { vhd })
|
||||
onLog(`deleting unused VHD ${vhd}`)
|
||||
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
|
||||
}
|
||||
}
|
||||
@@ -417,7 +420,7 @@ exports.cleanVm = async function cleanVm(
|
||||
const metadataWithMergedVhd = {}
|
||||
const doMerge = async () => {
|
||||
await asyncMap(toMerge, async chain => {
|
||||
const merged = await limitedMergeVhdChain(chain, { handler, logInfo, logWarn, remove, merge })
|
||||
const merged = await limitedMergeVhdChain(chain, { handler, onLog, remove, merge })
|
||||
if (merged !== undefined) {
|
||||
const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file
|
||||
metadataWithMergedVhd[metadataPath] = true
|
||||
@@ -429,18 +432,18 @@ exports.cleanVm = async function cleanVm(
|
||||
...unusedVhdsDeletion,
|
||||
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
|
||||
asyncMap(unusedXvas, path => {
|
||||
logWarn('unused XVA', { path })
|
||||
onLog(`the XVA ${path} is unused`)
|
||||
if (remove) {
|
||||
logInfo('deleting unused XVA', { path })
|
||||
onLog(`deleting unused XVA ${path}`)
|
||||
return handler.unlink(path)
|
||||
}
|
||||
}),
|
||||
asyncMap(xvaSums, path => {
|
||||
// no need to handle checksums for XVAs deleted by the script, they will be handled by `unlink()`
|
||||
if (!xvas.has(path.slice(0, -'.checksum'.length))) {
|
||||
logInfo('unused XVA checksum', { path })
|
||||
onLog(`the XVA checksum ${path} is unused`)
|
||||
if (remove) {
|
||||
logInfo('deleting unused XVA checksum', { path })
|
||||
onLog(`deleting unused XVA checksum ${path}`)
|
||||
return handler.unlink(path)
|
||||
}
|
||||
}
|
||||
@@ -474,11 +477,11 @@ exports.cleanVm = async function cleanVm(
|
||||
|
||||
// don't warn if the size has changed after a merge
|
||||
if (!merged && fileSystemSize !== size) {
|
||||
logWarn('incorrect size in metadata', { size: size ?? 'none', fileSystemSize })
|
||||
onLog(`incorrect size in metadata: ${size ?? 'none'} instead of ${fileSystemSize}`)
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logWarn('failed to get metadata size', { metadataPath, error })
|
||||
onLog(`failed to get size of ${metadataPath}`, { error })
|
||||
return
|
||||
}
|
||||
|
||||
@@ -488,7 +491,7 @@ exports.cleanVm = async function cleanVm(
|
||||
try {
|
||||
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
|
||||
} catch (error) {
|
||||
logWarn('metadata size update failed', { metadataPath, error })
|
||||
onLog(`failed to update size in backup metadata ${metadataPath} after merge`, { error })
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -69,8 +69,6 @@ job.start(data: { mode: Mode, reportWhen: ReportWhen })
|
||||
├─ task.warning(message: string)
|
||||
├─ task.start(data: { type: 'VM', id: string })
|
||||
│ ├─ task.warning(message: string)
|
||||
| ├─ task.start(message: 'clean-vm')
|
||||
│ │ └─ task.end
|
||||
│ ├─ task.start(message: 'snapshot')
|
||||
│ │ └─ task.end
|
||||
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, isFull: boolean })
|
||||
@@ -91,8 +89,12 @@ job.start(data: { mode: Mode, reportWhen: ReportWhen })
|
||||
│ │ ├─ task.start(message: 'clean')
|
||||
│ │ │ ├─ task.warning(message: string)
|
||||
│ │ │ └─ task.end
|
||||
│ │ └─ task.end
|
||||
| ├─ task.start(message: 'clean-vm')
|
||||
│ │ │
|
||||
│ │ │ // in case of delta backup
|
||||
│ │ ├─ task.start(message: 'merge')
|
||||
│ │ │ ├─ task.warning(message: string)
|
||||
│ │ │ └─ task.end(result: { size: number })
|
||||
│ │ │
|
||||
│ │ └─ task.end
|
||||
│ └─ task.end
|
||||
└─ job.end
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.25.0",
|
||||
"version": "0.23.0",
|
||||
"engines": {
|
||||
"node": ">=14.6"
|
||||
},
|
||||
@@ -22,7 +22,7 @@
|
||||
"@vates/disposable": "^0.1.1",
|
||||
"@vates/parse-duration": "^0.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/fs": "^1.0.3",
|
||||
"@xen-orchestra/fs": "^1.0.1",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"@xen-orchestra/template": "^0.1.0",
|
||||
"compare-versions": "^4.0.1",
|
||||
@@ -38,7 +38,7 @@
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"uuid": "^8.3.2",
|
||||
"vhd-lib": "^3.2.0",
|
||||
"vhd-lib": "^3.1.0",
|
||||
"yazl": "^2.5.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -46,7 +46,7 @@
|
||||
"tmp": "^0.2.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@xen-orchestra/xapi": "^1.2.0"
|
||||
"@xen-orchestra/xapi": "^1.0.0"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
|
||||
@@ -6,9 +6,8 @@ const { join } = require('path')
|
||||
const { getVmBackupDir } = require('../_getVmBackupDir.js')
|
||||
const MergeWorker = require('../merge-worker/index.js')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { Task } = require('../Task.js')
|
||||
|
||||
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
|
||||
const { warn } = createLogger('xo:backups:MixinBackupWriter')
|
||||
|
||||
exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
class MixinBackupWriter extends BaseClass {
|
||||
@@ -26,17 +25,11 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
|
||||
async _cleanVm(options) {
|
||||
try {
|
||||
return await Task.run({ name: 'clean-vm' }, () => {
|
||||
return this._adapter.cleanVm(this.#vmBackupDir, {
|
||||
...options,
|
||||
fixMetadata: true,
|
||||
logInfo: info,
|
||||
logWarn: (message, data) => {
|
||||
warn(message, data)
|
||||
Task.warning(message, data)
|
||||
},
|
||||
lock: false,
|
||||
})
|
||||
return await this._adapter.cleanVm(this.#vmBackupDir, {
|
||||
...options,
|
||||
fixMetadata: true,
|
||||
onLog: warn,
|
||||
lock: false,
|
||||
})
|
||||
} catch (error) {
|
||||
warn(error)
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"preferGlobal": true,
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.5.1",
|
||||
"xen-api": "^1.2.1"
|
||||
"xen-api": "^1.2.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/emit-async",
|
||||
"version": "1.0.0",
|
||||
"version": "0.1.0",
|
||||
"license": "ISC",
|
||||
"description": "Emit an event for async listeners to settle",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/emit-async",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "1.0.3",
|
||||
"version": "1.0.1",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import asyncMapSettled from '@xen-orchestra/async-map/legacy'
|
||||
import getStream from 'get-stream'
|
||||
import { coalesceCalls } from '@vates/coalesce-calls'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
@@ -12,8 +11,6 @@ import { synchronized } from 'decorator-synchronized'
|
||||
import { basename, dirname, normalize as normalizePath } from './_path'
|
||||
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
|
||||
|
||||
const { warn } = createLogger('@xen-orchestra:fs')
|
||||
|
||||
const checksumFile = file => file + '.checksum'
|
||||
const computeRate = (hrtime, size) => {
|
||||
const seconds = hrtime[0] + hrtime[1] / 1e9
|
||||
@@ -360,12 +357,11 @@ export default class RemoteHandlerAbstract {
|
||||
readRate: computeRate(readDuration, SIZE),
|
||||
}
|
||||
} catch (error) {
|
||||
warn(`error while testing the remote at step ${step}`, { error })
|
||||
return {
|
||||
success: false,
|
||||
step,
|
||||
file: testFileName,
|
||||
error,
|
||||
error: error.message || String(error),
|
||||
}
|
||||
} finally {
|
||||
ignoreErrors.call(this._unlink(testFileName))
|
||||
|
||||
@@ -2,12 +2,7 @@
|
||||
|
||||
const camelCase = require('lodash/camelCase')
|
||||
|
||||
const {
|
||||
defineProperties,
|
||||
defineProperty,
|
||||
hasOwn = Function.prototype.call.bind(Object.prototype.hasOwnProperty),
|
||||
keys,
|
||||
} = Object
|
||||
const { defineProperties, defineProperty, keys } = Object
|
||||
const noop = Function.prototype
|
||||
|
||||
const MIXIN_CYCLIC_DESCRIPTOR = {
|
||||
@@ -18,49 +13,23 @@ const MIXIN_CYCLIC_DESCRIPTOR = {
|
||||
}
|
||||
|
||||
module.exports = function mixin(object, mixins, args) {
|
||||
const importing = { __proto__: null }
|
||||
const importers = { __proto__: null }
|
||||
|
||||
function instantiateMixin(name, Mixin) {
|
||||
defineProperty(object, name, MIXIN_CYCLIC_DESCRIPTOR)
|
||||
const instance = new Mixin(object, ...args)
|
||||
defineProperty(object, name, {
|
||||
value: instance,
|
||||
})
|
||||
return instance
|
||||
}
|
||||
|
||||
// add lazy property for each of the mixin, this allows mixins to depend on
|
||||
// one another without any special ordering
|
||||
const descriptors = {
|
||||
loadMixin(name) {
|
||||
if (hasOwn(this, name)) {
|
||||
return Promise.resolve(this[name])
|
||||
}
|
||||
|
||||
let promise = importing[name]
|
||||
if (promise === undefined) {
|
||||
const clean = () => {
|
||||
delete importing[name]
|
||||
}
|
||||
promise = importers[name]().then(Mixin => instantiateMixin(name, Mixin))
|
||||
promise.then(clean, clean)
|
||||
importing[name] = promise
|
||||
}
|
||||
return promise
|
||||
},
|
||||
}
|
||||
const descriptors = {}
|
||||
keys(mixins).forEach(name => {
|
||||
const Mixin = mixins[name]
|
||||
name = camelCase(name)
|
||||
|
||||
if (Mixin.prototype === undefined) {
|
||||
importers[name] = Mixin(name)
|
||||
} else {
|
||||
descriptors[name] = {
|
||||
configurable: true,
|
||||
get: () => instantiateMixin(name, Mixin),
|
||||
}
|
||||
descriptors[name] = {
|
||||
configurable: true,
|
||||
get: () => {
|
||||
defineProperty(object, name, MIXIN_CYCLIC_DESCRIPTOR)
|
||||
const instance = new Mixin(object, ...args)
|
||||
defineProperty(object, name, {
|
||||
value: instance,
|
||||
})
|
||||
return instance
|
||||
},
|
||||
}
|
||||
})
|
||||
defineProperties(object, descriptors)
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"engines": {
|
||||
"node": ">=7.6"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"bind-property-descriptor": "^2.0.0",
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"version": "0.5.0",
|
||||
"version": "0.4.0",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/event-listeners-manager": "^1.0.0",
|
||||
"@vates/parse-duration": "^0.1.1",
|
||||
"@xen-orchestra/emit-async": "^1.0.0",
|
||||
"@xen-orchestra/emit-async": "^0.1.0",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"app-conf": "^2.1.0",
|
||||
"lodash": "^4.17.21",
|
||||
|
||||
@@ -33,19 +33,26 @@ async function main(argv) {
|
||||
ignoreUnknownFormats: true,
|
||||
})
|
||||
|
||||
const opts = getopts(argv, {
|
||||
const { hostname = 'localhost', port } = config?.http?.listen?.https ?? {}
|
||||
|
||||
const {
|
||||
_: args,
|
||||
file,
|
||||
help,
|
||||
host,
|
||||
raw,
|
||||
token,
|
||||
} = getopts(argv, {
|
||||
alias: { file: 'f', help: 'h' },
|
||||
boolean: ['help', 'raw'],
|
||||
default: {
|
||||
token: config.authenticationToken,
|
||||
},
|
||||
stopEarly: true,
|
||||
string: ['file', 'host', 'token', 'url'],
|
||||
string: ['file', 'host', 'token'],
|
||||
})
|
||||
|
||||
const { _: args, file } = opts
|
||||
|
||||
if (opts.help || (file === '' && args.length === 0)) {
|
||||
if (help || (file === '' && args.length === 0)) {
|
||||
return console.log(
|
||||
'%s',
|
||||
`Usage:
|
||||
@@ -70,29 +77,18 @@ ${pkg.name} v${pkg.version}`
|
||||
const baseRequest = {
|
||||
headers: {
|
||||
'content-type': 'application/json',
|
||||
cookie: `authenticationToken=${token}`,
|
||||
},
|
||||
pathname: '/api/v1',
|
||||
protocol: 'https:',
|
||||
rejectUnauthorized: false,
|
||||
}
|
||||
let { token } = opts
|
||||
if (opts.url !== '') {
|
||||
const { protocol, host, username } = new URL(opts.url)
|
||||
Object.assign(baseRequest, { protocol, host })
|
||||
if (username !== '') {
|
||||
token = username
|
||||
}
|
||||
if (host !== '') {
|
||||
baseRequest.host = host
|
||||
} else {
|
||||
baseRequest.protocol = 'https:'
|
||||
if (opts.host !== '') {
|
||||
baseRequest.host = opts.host
|
||||
} else {
|
||||
const { hostname = 'localhost', port } = config?.http?.listen?.https ?? {}
|
||||
baseRequest.hostname = hostname
|
||||
baseRequest.port = port
|
||||
}
|
||||
baseRequest.hostname = hostname
|
||||
baseRequest.port = port
|
||||
}
|
||||
baseRequest.headers.cookie = `authenticationToken=${token}`
|
||||
|
||||
const call = async ({ method, params }) => {
|
||||
if (callPath.length !== 0) {
|
||||
process.stderr.write(`\n${colors.bold(`--- call #${callPath.join('.')}`)} ---\n\n`)
|
||||
@@ -131,7 +127,7 @@ ${pkg.name} v${pkg.version}`
|
||||
stdout.write(inspect(JSON.parse(line), { colors: true, depth: null }))
|
||||
stdout.write('\n')
|
||||
}
|
||||
} else if (opts.raw && typeof result === 'string') {
|
||||
} else if (raw && typeof result === 'string') {
|
||||
stdout.write(result)
|
||||
} else {
|
||||
stdout.write(inspect(result, { colors: true, depth: null }))
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/proxy-cli",
|
||||
"version": "0.3.0",
|
||||
"version": "0.2.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "CLI for @xen-orchestra/proxy",
|
||||
"keywords": [
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/proxy",
|
||||
"version": "0.23.2",
|
||||
"version": "0.22.1",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "XO Proxy used to remotely execute backup jobs",
|
||||
"keywords": [
|
||||
@@ -32,13 +32,13 @@
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/disposable": "^0.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.25.0",
|
||||
"@xen-orchestra/fs": "^1.0.3",
|
||||
"@xen-orchestra/backups": "^0.23.0",
|
||||
"@xen-orchestra/fs": "^1.0.1",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"@xen-orchestra/mixin": "^0.1.0",
|
||||
"@xen-orchestra/mixins": "^0.5.0",
|
||||
"@xen-orchestra/self-signed": "^0.1.3",
|
||||
"@xen-orchestra/xapi": "^1.2.0",
|
||||
"@xen-orchestra/mixins": "^0.4.0",
|
||||
"@xen-orchestra/self-signed": "^0.1.0",
|
||||
"@xen-orchestra/xapi": "^1.0.0",
|
||||
"ajv": "^8.0.3",
|
||||
"app-conf": "^2.1.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
@@ -46,7 +46,7 @@
|
||||
"get-stream": "^6.0.0",
|
||||
"getopts": "^2.2.3",
|
||||
"golike-defer": "^0.5.1",
|
||||
"http-server-plus": "^0.11.1",
|
||||
"http-server-plus": "^0.11.0",
|
||||
"http2-proxy": "^5.0.53",
|
||||
"json-rpc-protocol": "^0.13.1",
|
||||
"jsonrpc-websocket-client": "^0.7.2",
|
||||
@@ -60,7 +60,7 @@
|
||||
"source-map-support": "^0.5.16",
|
||||
"stoppable": "^1.0.6",
|
||||
"xdg-basedir": "^5.1.0",
|
||||
"xen-api": "^1.2.1",
|
||||
"xen-api": "^1.2.0",
|
||||
"xo-common": "^0.8.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -8,7 +8,21 @@ exports.genSelfSignedCert = async ({ days = 360 } = {}) =>
|
||||
new Promise((resolve, reject) => {
|
||||
execFile(
|
||||
'openssl',
|
||||
['req', '-batch', '-new', '-x509', '-days', String(days), '-nodes', '-newkey', 'rsa:2048', '-keyout', '-'],
|
||||
[
|
||||
'req',
|
||||
'-batch',
|
||||
'-new',
|
||||
'-x509',
|
||||
'-days',
|
||||
String(days),
|
||||
'-nodes',
|
||||
'-newkey',
|
||||
'ec',
|
||||
'-pkeyopt',
|
||||
'ec_paramgen_curve:secp384r1',
|
||||
'-keyout',
|
||||
'-',
|
||||
],
|
||||
(error, stdout) => {
|
||||
if (error != null) {
|
||||
return reject(error)
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.3",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/upload-ova",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.4",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Basic CLI to upload ova files to Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -43,7 +43,7 @@
|
||||
"pw": "^0.0.4",
|
||||
"xdg-basedir": "^4.0.0",
|
||||
"xo-lib": "^0.11.1",
|
||||
"xo-vmdk-to-vhd": "^2.4.1"
|
||||
"xo-vmdk-to-vhd": "^2.3.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
// TODO: remove when Node >=15.0
|
||||
module.exports = class AggregateError extends Error {
|
||||
constructor(errors, message) {
|
||||
super(message)
|
||||
this.errors = errors
|
||||
}
|
||||
}
|
||||
@@ -230,9 +230,8 @@ function mixin(mixins) {
|
||||
defineProperties(xapiProto, descriptors)
|
||||
}
|
||||
mixin({
|
||||
host: require('./host.js'),
|
||||
SR: require('./sr.js'),
|
||||
task: require('./task.js'),
|
||||
host: require('./host.js'),
|
||||
VBD: require('./vbd.js'),
|
||||
VDI: require('./vdi.js'),
|
||||
VIF: require('./vif.js'),
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/xapi",
|
||||
"version": "1.2.0",
|
||||
"version": "1.0.0",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
@@ -15,7 +15,7 @@
|
||||
"node": ">=14"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"xen-api": "^1.2.1"
|
||||
"xen-api": "^1.2.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
@@ -28,7 +28,6 @@
|
||||
"golike-defer": "^0.5.1",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"vhd-lib": "^3.2.0",
|
||||
"xo-common": "^0.8.0"
|
||||
},
|
||||
"private": false,
|
||||
|
||||
@@ -1,164 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { decorateClass } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { incorrectState } = require('xo-common/api-errors')
|
||||
const { VDI_FORMAT_RAW } = require('./index.js')
|
||||
const peekFooterFromStream = require('vhd-lib/peekFooterFromVhdStream')
|
||||
|
||||
const AggregateError = require('./_AggregateError.js')
|
||||
|
||||
const { warn } = require('@xen-orchestra/log').createLogger('xo:xapi:sr')
|
||||
|
||||
const OC_MAINTENANCE = 'xo:maintenanceState'
|
||||
|
||||
class Sr {
|
||||
async create({
|
||||
content_type = 'user', // recommended by Citrix
|
||||
device_config,
|
||||
host,
|
||||
name_description = '',
|
||||
name_label,
|
||||
physical_size = 0,
|
||||
shared,
|
||||
sm_config = {},
|
||||
type,
|
||||
}) {
|
||||
const ref = await this.call(
|
||||
'SR.create',
|
||||
host,
|
||||
device_config,
|
||||
physical_size,
|
||||
name_label,
|
||||
name_description,
|
||||
type,
|
||||
content_type,
|
||||
shared,
|
||||
sm_config
|
||||
)
|
||||
|
||||
// https://developer-docs.citrix.com/projects/citrix-hypervisor-sdk/en/latest/xc-api-extensions/#sr
|
||||
this.setFieldEntry('SR', ref, 'other_config', 'auto-scan', 'true').catch(warn)
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
// Switch the SR to maintenance mode:
|
||||
// - shutdown all running VMs with a VDI on this SR
|
||||
// - their UUID is saved into SR.other_config[OC_MAINTENANCE].shutdownVms
|
||||
// - clean shutdown is attempted, and falls back to a hard shutdown
|
||||
// - unplug all connected hosts from this SR
|
||||
async enableMaintenanceMode($defer, ref, { vmsToShutdown = [] } = {}) {
|
||||
const state = { timestamp: Date.now() }
|
||||
|
||||
// will throw if already in maintenance mode
|
||||
await this.call('SR.add_to_other_config', ref, OC_MAINTENANCE, JSON.stringify(state))
|
||||
|
||||
await $defer.onFailure.call(this, 'call', 'SR.remove_from_other_config', ref, OC_MAINTENANCE)
|
||||
|
||||
const runningVms = new Map()
|
||||
const handleVbd = async ref => {
|
||||
const vmRef = await this.getField('VBD', ref, 'VM')
|
||||
if (!runningVms.has(vmRef)) {
|
||||
const power_state = await this.getField('VM', vmRef, 'power_state')
|
||||
const isPaused = power_state === 'Paused'
|
||||
if (isPaused || power_state === 'Running') {
|
||||
runningVms.set(vmRef, isPaused)
|
||||
}
|
||||
}
|
||||
}
|
||||
await asyncMap(await this.getField('SR', ref, 'VDIs'), async ref => {
|
||||
await asyncMap(await this.getField('VDI', ref, 'VBDs'), handleVbd)
|
||||
})
|
||||
|
||||
{
|
||||
const runningVmUuids = await asyncMap(runningVms.keys(), ref => this.getField('VM', ref, 'uuid'))
|
||||
|
||||
const set = new Set(vmsToShutdown)
|
||||
for (const vmUuid of runningVmUuids) {
|
||||
if (!set.has(vmUuid)) {
|
||||
throw incorrectState({
|
||||
actual: vmsToShutdown,
|
||||
expected: runningVmUuids,
|
||||
property: 'vmsToShutdown',
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state.shutdownVms = {}
|
||||
|
||||
await asyncMapSettled(runningVms, async ([ref, isPaused]) => {
|
||||
state.shutdownVms[await this.getField('VM', ref, 'uuid')] = isPaused
|
||||
|
||||
try {
|
||||
await this.callAsync('VM.clean_shutdown', ref)
|
||||
} catch (error) {
|
||||
warn('SR_enableMaintenanceMode, VM clean shutdown', { error })
|
||||
await this.callAsync('VM.hard_shutdown', ref)
|
||||
}
|
||||
|
||||
$defer.onFailure.call(this, 'callAsync', 'VM.start', ref, isPaused, true)
|
||||
})
|
||||
|
||||
state.unpluggedPbds = []
|
||||
await asyncMapSettled(await this.getField('SR', ref, 'PBDs'), async ref => {
|
||||
if (await this.getField('PBD', ref, 'currently_attached')) {
|
||||
state.unpluggedPbds.push(await this.getField('PBD', ref, 'uuid'))
|
||||
|
||||
await this.callAsync('PBD.unplug', ref)
|
||||
|
||||
$defer.onFailure.call(this, 'callAsync', 'PBD.plug', ref)
|
||||
}
|
||||
})
|
||||
|
||||
await this.setFieldEntry('SR', ref, 'other_config', OC_MAINTENANCE, JSON.stringify(state))
|
||||
}
|
||||
|
||||
// this method is best effort and will not stop on first error
|
||||
async disableMaintenanceMode(ref) {
|
||||
const state = JSON.parse((await this.getField('SR', ref, 'other_config'))[OC_MAINTENANCE])
|
||||
|
||||
// will throw if not in maintenance mode
|
||||
await this.call('SR.remove_from_other_config', ref, OC_MAINTENANCE)
|
||||
|
||||
const errors = []
|
||||
|
||||
await asyncMap(state.unpluggedPbds, async uuid => {
|
||||
try {
|
||||
await this.callAsync('PBD.plug', await this.call('PBD.get_by_uuid', uuid))
|
||||
} catch (error) {
|
||||
errors.push(error)
|
||||
}
|
||||
})
|
||||
|
||||
await asyncMap(Object.entries(state.shutdownVms), async ([uuid, isPaused]) => {
|
||||
try {
|
||||
await this.callAsync('VM.start', await this.call('VM.get_by_uuid', uuid), isPaused, true)
|
||||
} catch (error) {
|
||||
errors.push(error)
|
||||
}
|
||||
})
|
||||
|
||||
if (errors.length !== 0) {
|
||||
throw new AggregateError(errors)
|
||||
}
|
||||
}
|
||||
|
||||
async importVdi(
|
||||
$defer,
|
||||
ref,
|
||||
stream,
|
||||
{ name_label = '[XO] Imported disk - ' + new Date().toISOString(), ...vdiCreateOpts } = {}
|
||||
) {
|
||||
const footer = await peekFooterFromStream(stream)
|
||||
const vdiRef = await this.VDI_create({ ...vdiCreateOpts, name_label, SR: ref, virtual_size: footer.currentSize })
|
||||
$defer.onFailure.call(this, 'callAsync', 'VDI.destroy', vdiRef)
|
||||
await this.VDI_importContent(vdiRef, stream, { format: VDI_FORMAT_RAW })
|
||||
return vdiRef
|
||||
}
|
||||
}
|
||||
module.exports = Sr
|
||||
|
||||
decorateClass(Sr, { enableMaintenanceMode: defer, importVdi: defer })
|
||||
@@ -6,8 +6,6 @@ const { Ref } = require('xen-api')
|
||||
|
||||
const isVmRunning = require('./_isVmRunning.js')
|
||||
|
||||
const { warn } = require('@xen-orchestra/log').createLogger('xo:xapi:vbd')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
module.exports = class Vbd {
|
||||
@@ -68,10 +66,8 @@ module.exports = class Vbd {
|
||||
})
|
||||
|
||||
if (isVmRunning(powerState)) {
|
||||
this.callAsync('VBD.plug', vbdRef).catch(warn)
|
||||
await this.callAsync('VBD.plug', vbdRef)
|
||||
}
|
||||
|
||||
return vbdRef
|
||||
}
|
||||
|
||||
async unplug(ref) {
|
||||
|
||||
@@ -30,7 +30,8 @@ class Vdi {
|
||||
other_config = {},
|
||||
read_only = false,
|
||||
sharable = false,
|
||||
SR = this.pool.default_SR,
|
||||
sm_config,
|
||||
SR,
|
||||
tags,
|
||||
type = 'user',
|
||||
virtual_size,
|
||||
@@ -38,10 +39,10 @@ class Vdi {
|
||||
},
|
||||
{
|
||||
// blindly copying `sm_config` from another VDI can create problems,
|
||||
// therefore it should be passed explicitly
|
||||
// therefore it is ignored by default by this method
|
||||
//
|
||||
// see https://github.com/vatesfr/xen-orchestra/issues/4482
|
||||
sm_config,
|
||||
setSmConfig = false,
|
||||
} = {}
|
||||
) {
|
||||
return this.call('VDI.create', {
|
||||
@@ -50,7 +51,7 @@ class Vdi {
|
||||
other_config,
|
||||
read_only,
|
||||
sharable,
|
||||
sm_config,
|
||||
sm_config: setSmConfig ? sm_config : undefined,
|
||||
SR,
|
||||
tags,
|
||||
type,
|
||||
|
||||
@@ -11,7 +11,7 @@ const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { decorateClass } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { incorrectState, forbiddenOperation } = require('xo-common/api-errors.js')
|
||||
const { incorrectState } = require('xo-common/api-errors.js')
|
||||
const { Ref } = require('xen-api')
|
||||
|
||||
const extractOpaqueRef = require('./_extractOpaqueRef.js')
|
||||
@@ -343,13 +343,7 @@ class Vm {
|
||||
const vm = await this.getRecord('VM', vmRef)
|
||||
|
||||
if (!bypassBlockedOperation && 'destroy' in vm.blocked_operations) {
|
||||
throw forbiddenOperation(
|
||||
`destroy is blocked: ${
|
||||
vm.blocked_operations.destroy === 'true'
|
||||
? 'protected from accidental deletion'
|
||||
: vm.blocked_operations.destroy
|
||||
}`
|
||||
)
|
||||
throw new Error('destroy is blocked')
|
||||
}
|
||||
|
||||
if (!forceDeleteDefaultTemplate && isDefaultTemplate(vm)) {
|
||||
@@ -531,15 +525,11 @@ class Vm {
|
||||
|
||||
// requires the VM to be halted because it's not possible to re-plug VUSB on a live VM
|
||||
if (unplugVusbs && isHalted) {
|
||||
// vm.VUSBs can be undefined (e.g. on XS 7.0.0)
|
||||
const vusbs = vm.VUSBs
|
||||
if (vusbs !== undefined) {
|
||||
await asyncMap(vusbs, async ref => {
|
||||
const vusb = await this.getRecord('VUSB', ref)
|
||||
await vusb.$call('destroy')
|
||||
$defer.call(this, 'call', 'VUSB.create', vusb.VM, vusb.USB_group, vusb.other_config)
|
||||
})
|
||||
}
|
||||
await asyncMap(vm.VUSBs, async ref => {
|
||||
const vusb = await this.getRecord('VUSB', ref)
|
||||
await vusb.$call('destroy')
|
||||
$defer.call(this, 'call', 'VUSB.create', vusb.VM, vusb.USB_group, vusb.other_config)
|
||||
})
|
||||
}
|
||||
|
||||
let destroyNobakVdis = false
|
||||
|
||||
83
CHANGELOG.md
83
CHANGELOG.md
@@ -1,84 +1,5 @@
|
||||
# ChangeLog
|
||||
|
||||
## **5.71.1 (2022-06-13)**
|
||||
|
||||
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Show raw errors to administrators instead of _unknown error from the peer_ (PR [#6260](https://github.com/vatesfr/xen-orchestra/pull/6260))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [New SR] Fix `method.startsWith is not a function` when creating an _ext_ SR
|
||||
- Import VDI content now works when there is a HTTP proxy between XO and the host (PR [#6261](https://github.com/vatesfr/xen-orchestra/pull/6261))
|
||||
- [Backup] Fix `undefined is not iterable (cannot read property Symbol(Symbol.iterator))` on XS 7.0.0
|
||||
- [Backup] Ensure a warning is shown if a target preparation step fails (PR [#6266](https://github.com/vatesfr/xen-orchestra/pull/6266))
|
||||
- [OVA Export] Avoid creating a zombie task (PR [#6267](https://github.com/vatesfr/xen-orchestra/pull/6267))
|
||||
- [OVA Export] Increase speed by lowering compression to acceptable level (PR [#6267](https://github.com/vatesfr/xen-orchestra/pull/6267))
|
||||
- [OVA Export] Fix broken OVAs due to special characters in VM name (PR [#6267](https://github.com/vatesfr/xen-orchestra/pull/6267))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/backups 0.25.0
|
||||
- @xen-orchestra/backups-cli 0.7.3
|
||||
- xen-api 1.2.1
|
||||
- @xen-orchestra/xapi 1.2.0
|
||||
- @xen-orchestra/proxy 0.23.2
|
||||
- @xen-orchestra/proxy-cli 0.3.0
|
||||
- xo-cli 0.14.0
|
||||
- xo-vmdk-to-vhd 2.4.1
|
||||
- xo-server 5.96.0
|
||||
- xo-web 5.97.2
|
||||
|
||||
## **5.71.0 (2022-05-31)**
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Backup] _Restore Health Check_ can now be configured to be run automatically during a backup schedule (PRs [#6227](https://github.com/vatesfr/xen-orchestra/pull/6227), [#6228](https://github.com/vatesfr/xen-orchestra/pull/6228), [#6238](https://github.com/vatesfr/xen-orchestra/pull/6238) & [#6242](https://github.com/vatesfr/xen-orchestra/pull/6242))
|
||||
- [Backup] VMs with USB Pass-through devices are now supported! The advanced _Offline Snapshot Mode_ setting must be enabled. For Full Backup or Disaster Recovery jobs, Rolling Snapshot needs to be anabled as well. (PR [#6239](https://github.com/vatesfr/xen-orchestra/pull/6239))
|
||||
- [Backup] Implement file cache for listing the backups of a VM (PR [#6220](https://github.com/vatesfr/xen-orchestra/pull/6220))
|
||||
- [RPU/Host] If some backup jobs are running on the pool, ask for confirmation before starting an RPU, shutdown/rebooting a host or restarting a host's toolstack (PR [6232](https://github.com/vatesfr/xen-orchestra/pull/6232))
|
||||
- [XO Web] Add ability to configure a default filter for Storage [#6236](https://github.com/vatesfr/xen-orchestra/issues/6236) (PR [#6237](https://github.com/vatesfr/xen-orchestra/pull/6237))
|
||||
- [REST API] Support VDI creation via VHD import
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Backup] Merge multiple VHDs at once which will speed up the merging phase after reducing the retention of a backup job(PR [#6184](https://github.com/vatesfr/xen-orchestra/pull/6184))
|
||||
- [Backup] Add setting `backups.metadata.defaultSettings.unconditionalSnapshot` in `xo-server`'s configuration file to force a snapshot even when not required by the backup, this is useful to avoid locking the VM halted during the backup (PR [#6221](https://github.com/vatesfr/xen-orchestra/pull/6221))
|
||||
- [VM migration] Ensure the VM can be migrated before performing the migration to avoid issues [#5301](https://github.com/vatesfr/xen-orchestra/issues/5301) (PR [#6245](https://github.com/vatesfr/xen-orchestra/pull/6245))
|
||||
- [Backup] Show any detected errors on existing backups instead of fixing them silently (PR [#6207](https://github.com/vatesfr/xen-orchestra/pull/6225))
|
||||
- Created SRs will now have auto-scan enabled similarly to what XenCenter does (PR [#6246](https://github.com/vatesfr/xen-orchestra/pull/6246))
|
||||
- [RPU] Disable scheduled backup jobs during RPU (PR [#6244](https://github.com/vatesfr/xen-orchestra/pull/6244))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [S3] Fix S3 remote with empty directory not showing anything to restore (PR [#6218](https://github.com/vatesfr/xen-orchestra/pull/6218))
|
||||
- [S3] remote fom did not save the `https` and `allow unatuhorized`during remote creation (PR [#6219](https://github.com/vatesfr/xen-orchestra/pull/6219))
|
||||
- [VM/advanced] Fix various errors when adding ACLs [#6213](https://github.com/vatesfr/xen-orchestra/issues/6213) (PR [#6230](https://github.com/vatesfr/xen-orchestra/pull/6230))
|
||||
- [Home/Self] Don't make VM's resource set name clickable for non admin users as they aren't allowed to view the Self Service page (PR [#6252](https://github.com/vatesfr/xen-orchestra/pull/6252))
|
||||
- [load-balancer] Fix density mode failing to shutdown hosts (PR [#6253](https://github.com/vatesfr/xen-orchestra/pull/6253))
|
||||
- [Health] Make "Too many snapshots" table sortable by number of snapshots (PR [#6255](https://github.com/vatesfr/xen-orchestra/pull/6255))
|
||||
- [Remote] Show complete errors instead of only a potentially missing message (PR [#6216](https://github.com/vatesfr/xen-orchestra/pull/6216))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/self-signed 0.1.3
|
||||
- vhd-lib 3.2.0
|
||||
- @xen-orchestra/fs 1.0.3
|
||||
- vhd-cli 0.7.2
|
||||
- xo-vmdk-to-vhd 2.4.0
|
||||
- @xen-orchestra/upload-ova 0.1.5
|
||||
- @xen-orchestra/xapi 1.1.0
|
||||
- @xen-orchestra/backups 0.24.0
|
||||
- @xen-orchestra/backups-cli 0.7.2
|
||||
- @xen-orchestra/emit-async 1.0.0
|
||||
- @xen-orchestra/mixins 0.5.0
|
||||
- @xen-orchestra/proxy 0.23.1
|
||||
- xo-server 5.95.0
|
||||
- xo-web 5.97.1
|
||||
- xo-server-backup-reports 0.17.0
|
||||
|
||||
## 5.70.2 (2022-05-16)
|
||||
|
||||
### Bug fixes
|
||||
@@ -114,7 +35,7 @@
|
||||
|
||||
## 5.70.0 (2022-04-29)
|
||||
|
||||
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
|
||||
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
|
||||
|
||||
### Highlights
|
||||
|
||||
@@ -152,6 +73,8 @@
|
||||
|
||||
## **5.69.2** (2022-04-13)
|
||||
|
||||
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Rolling Pool Update] New algorithm for XCP-ng updates (PR [#6188](https://github.com/vatesfr/xen-orchestra/pull/6188))
|
||||
|
||||
@@ -7,15 +7,19 @@
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Backup] Merge delta backups without copying data when using VHD directories on NFS/SMB/local remote(https://github.com/vatesfr/xen-orchestra/pull/6271))
|
||||
- [Backup] Merge multiple VHDs at once which will speed up the merging ĥase after reducing the retention of a backup job(PR [#6184](https://github.com/vatesfr/xen-orchestra/pull/6184))
|
||||
- [Backup] Implement file cache for listing the backups of a VM (PR [#6220](https://github.com/vatesfr/xen-orchestra/pull/6220))
|
||||
- [Backup] Add setting `backups.metadata.defaultSettings.unconditionalSnapshot` in `xo-server`'s configuration file to force a snapshot even when not required by the backup, this is useful to avoid locking the VM halted during the backup (PR [#6221](https://github.com/vatesfr/xen-orchestra/pull/6221))
|
||||
- [XO Web] Add ability to configure a default filter for Storage [#6236](https://github.com/vatesfr/xen-orchestra/issues/6236) (PR [#6237](https://github.com/vatesfr/xen-orchestra/pull/6237))
|
||||
- [Backup] VMs with USB Pass-through devices are now supported! The advanced _Offline Snapshot Mode_ setting must be enabled. For Full Backup or Disaster Recovery jobs, Rolling Snapshot needs to be anabled as well. (PR [#6239](https://github.com/vatesfr/xen-orchestra/pull/6239))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [VDI Import] Fix `this._getOrWaitObject is not a function`
|
||||
- [VM] Attempting to delete a protected VM should display a modal with the error and the ability to bypass it (PR [#6290](https://github.com/vatesfr/xen-orchestra/pull/6290))
|
||||
- [OVA Import] Fix import stuck after first disk
|
||||
- [S3] Fix S3 remote with empty directory not showing anything to restore (PR [#6218](https://github.com/vatesfr/xen-orchestra/pull/6218))
|
||||
- [S3] remote fom did not save the `https` and `allow unatuhorized`during remote creation (PR [#6219](https://github.com/vatesfr/xen-orchestra/pull/6219))
|
||||
- [VM/advanced] Fix various errors when adding ACLs [#6213](https://github.com/vatesfr/xen-orchestra/issues/6213) (PR [#6230](https://github.com/vatesfr/xen-orchestra/pull/6230))
|
||||
|
||||
### Packages to release
|
||||
|
||||
@@ -28,18 +32,22 @@
|
||||
> - patch: if the change is a bug fix or a simple code improvement
|
||||
> - minor: if the change is a new feature
|
||||
> - major: if the change breaks compatibility
|
||||
>
|
||||
> Keep this list alphabetically ordered to avoid merge conflicts
|
||||
|
||||
<!--packages-start-->
|
||||
|
||||
- @vates/event-listeners-manager patch
|
||||
- @vates/read-chunk major
|
||||
- @xen-orchestra/self-signed patch
|
||||
- vhd-lib patch
|
||||
- @xen-orchestra/fs patch
|
||||
- vhd-cli patch
|
||||
- xo-vmdk-to-vhd minor
|
||||
- @xen-orchestra/upload-ova patch
|
||||
- @xen-orchestra/backups minor
|
||||
- @xen-orchestra/xapi minor
|
||||
- vhd-lib minor
|
||||
- xo-remote-parser minor
|
||||
- @xen-orchestra/backups-cli patch
|
||||
- @xen-orchestra/emit-async major
|
||||
- @xen-orchestra/mixins minor
|
||||
- @xen-orchestra/proxy minor
|
||||
- xo-server minor
|
||||
- xo-vmdk-to-vhd patch
|
||||
- xo-web minor
|
||||
- xo-server-backup-reports minor
|
||||
|
||||
<!--packages-end-->
|
||||
|
||||
@@ -99,38 +99,3 @@ To solve this issue, we recommend that you:
|
||||
|
||||
- wait until the other backup job is completed/the merge process is done
|
||||
- make sure your remote storage is not being overworked
|
||||
|
||||
## Error: HTTP connection has timed out
|
||||
|
||||
This error occurs when XO tries to fetch data from a host, via the HTTP GET method. This error essentially means that the host (dom0 specifically) isn't responding anymore, after we asked it to expose the disk to be exported. This could be a symptom of having an overloaded dom0 that couldn't respond fast enough. It can also be caused by dom0 having trouble attaching the disk in question to expose it for fetching via HTTP, or just not having enough resources to answer our GET request.
|
||||
|
||||
::: warning
|
||||
As a temporary workaround you can increase the timeout higher than the default value, to allow the host more time to respond. But you will need to eventually diagnose the root cause of the slow host response or else you risk the issue returning.
|
||||
:::
|
||||
|
||||
Create the following file:
|
||||
```
|
||||
/etc/xo-server/config.httpInactivityTimeout.toml
|
||||
```
|
||||
Add the following lines:
|
||||
```
|
||||
# XOA Support - Work-around HTTP timeout issue during backups
|
||||
[xapiOptions]
|
||||
httpInactivityTimeout = 1800000 # 30 mins
|
||||
```
|
||||
|
||||
## Error: Expected values to be strictly equal
|
||||
|
||||
This error occurs at the end of the transfer. XO checks the exported VM disk integrity, to ensure it's a valid VHD file (we check the VHD header as well as the footer of the received file). This error means the header and footage did not match, so the file is incomplete (likely the export from dom0 failed at some point and we only received a partial HD/VM disk).
|
||||
|
||||
## Error: the job is already running
|
||||
|
||||
This means the same job is still running, typically from the last scheduled run. This happens when you have a backup job scheduled too often. It can also occur if you have a long timeout configured for the job, and a slow VM export or slow transfer to your remote. In either case, you need to adjust your backup schedule to allow time for the job to finish or timeout before the next scheduled run. We consider this an error to ensure you'll be notified that the planned schedule won't run this time because the previous one isn't finished.
|
||||
|
||||
## Error: VDI_IO_ERROR
|
||||
|
||||
This error comes directly from your host/dom0, and not XO. Essentially, XO asked the host to expose a VM disk to export via HTTP (as usual), XO managed to make the HTTP GET connection, and even start the transfer. But then at some point the host couldn't read the VM disk any further, causing this error on the host side. This might happen if the VDI is corrupted on the storage, or if there's a race condition during snapshots. More rarely, this can also occur if your SR is just too slow to keep up with the export as well as live VM traffic.
|
||||
|
||||
## Error: no XAPI associated to <UUID>
|
||||
|
||||
This message means that XO had a UUID of a VM to backup, but when the job ran it couldn't find any object matching it. This could be caused by the pool where this VM lived no longer being connected to XO. Double-check that the pool hosting the VM is currently connected under Settings > Servers. You can also search for the VM UUID in the Home > VMs search bar. If you can see it, run the backup job again and it will work. If you cannot, either the VM was removed or the pool is not connected.
|
||||
|
||||
@@ -66,13 +66,12 @@ You shouldn't have to change this. It's the path where `xo-web` files are served
|
||||
|
||||
## Custom certificate authority
|
||||
|
||||
If you use certificates signed by an in-house CA for your XCP-ng or XenServer hosts, and want to have Xen Orchestra connect to them without rejection, you can use the [`NODE_EXTRA_CA_CERTS`](https://nodejs.org/api/cli.html#cli_node_extra_ca_certs_file) environment variable.
|
||||
If you use certificates signed by an in-house CA for your XenServer hosts, and want to have Xen Orchestra connect to them without rejection, you need to add the `--use-openssl-ca` option in Node, but also add the CA to your trust store (`/etc/ssl/certs` via `update-ca-certificates` in your XOA).
|
||||
|
||||
To enable this option in your XOA, create `/etc/systemd/system/xo-server.service.d/ca.conf` with the following content:
|
||||
To enable this option in your XOA, edit the `/etc/systemd/system/xo-server.service` file and add this:
|
||||
|
||||
```
|
||||
[Service]
|
||||
Environment=NODE_EXTRA_CA_CERTS=/usr/local/share/ca-certificates/my-cert.crt
|
||||
Environment=NODE_OPTIONS=--use-openssl-ca
|
||||
```
|
||||
|
||||
Don't forget to reload `systemd` conf and restart `xo-server`:
|
||||
@@ -82,7 +81,9 @@ Don't forget to reload `systemd` conf and restart `xo-server`:
|
||||
# systemctl restart xo-server.service
|
||||
```
|
||||
|
||||
> For XO Proxy, the process is almost the same except the file to create is `/etc/systemd/system/xo-proxy.service.d/ca.conf` and the service to restart is `xo-proxy.service`.
|
||||
:::tip
|
||||
The `--use-openssl-ca` option is ignored by Node if Xen-Orchestra is run with Linux capabilities. Capabilities are commonly used to bind applications to privileged ports (<1024) (i.e. `CAP_NET_BIND_SERVICE`). Local NAT rules (`iptables`) or a reverse proxy would be required to use privileged ports and a custom certficate authority.
|
||||
:::
|
||||
|
||||
## Redis server
|
||||
|
||||
|
||||
@@ -18,8 +18,6 @@ If you lose your main pool, you can start the copy on the other side, with very
|
||||
|
||||
:::warning
|
||||
It is normal that you can't boot the copied VM directly: we protect it. The normal workflow is to make a clone and then work on it.
|
||||
|
||||
This also affects VMs with "Auto Power On" enabled, because of our protections you can ensure these won't start on your CR destination if you happen to reboot it.
|
||||
:::
|
||||
|
||||
## Configure it
|
||||
|
||||
@@ -35,7 +35,3 @@ A higher retention number will lead to huge space occupation on your SR.
|
||||
If you boot a copy of your production VM, be careful: if they share the same static IP, you'll have troubles.
|
||||
|
||||
A good way to avoid this kind of problem is to remove the network interface on the DR VM and check if the export is correctly done.
|
||||
|
||||
:::warning
|
||||
For each DR replicated VM, we add "start" as a blocked operation, meaning even VMs with "Auto power on" enabled will not be started on your DR destination if it reboots.
|
||||
:::
|
||||
|
||||
@@ -141,28 +141,6 @@ curl \
|
||||
> myDisk.vhd
|
||||
```
|
||||
|
||||
## VDI Import
|
||||
|
||||
A VHD can be imported on an SR to create a VDI at `/rest/v0/srs/<sr uuid>/vdis`.
|
||||
|
||||
```bash
|
||||
curl \
|
||||
-X POST \
|
||||
-b authenticationToken=KQxQdm2vMiv7jBIK0hgkmgxKzemd8wSJ7ugFGKFkTbs \
|
||||
-T myDisk.vhd \
|
||||
'https://xo.example.org/rest/v0/srs/357bd56c-71f9-4b2a-83b8-3451dec04b8f/vdis?name_label=my_imported_VDI' \
|
||||
| cat
|
||||
```
|
||||
|
||||
> Note: the final `| cat` ensures cURL's standard output is not a TTY, which is necessary for upload stats to be dislayed.
|
||||
|
||||
This request returns the UUID of the created VDI.
|
||||
|
||||
The following query parameters are supported to customize the created VDI:
|
||||
|
||||
- `name_label`
|
||||
- `name_description`
|
||||
|
||||
## The future
|
||||
|
||||
We are adding features and improving the REST API step by step. If you have interesting use cases or feedback, please ask directly at <https://xcp-ng.org/forum/category/12/xen-orchestra>
|
||||
|
||||
@@ -60,7 +60,6 @@
|
||||
"testEnvironment": "node",
|
||||
"testPathIgnorePatterns": [
|
||||
"/@vates/decorate-with/",
|
||||
"/@vates/event-listeners-manager/",
|
||||
"/@vates/predicates/",
|
||||
"/@xen-orchestra/audit-core/",
|
||||
"/dist/",
|
||||
|
||||
3
packages/complex-matcher/.babelrc.js
Normal file
3
packages/complex-matcher/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
1
packages/complex-matcher/.eslintrc.js
Symbolic link
1
packages/complex-matcher/.eslintrc.js
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -1,14 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { parse } = require('./')
|
||||
const { ast, pattern } = require('./index.fixtures')
|
||||
|
||||
module.exports = ({ benchmark }) => {
|
||||
benchmark('parse', () => {
|
||||
parse(pattern)
|
||||
})
|
||||
|
||||
benchmark('toString', () => {
|
||||
ast.toString()
|
||||
})
|
||||
}
|
||||
@@ -16,6 +16,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
@@ -25,7 +26,21 @@
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
12
packages/complex-matcher/src/index.bench.js
Normal file
12
packages/complex-matcher/src/index.bench.js
Normal file
@@ -0,0 +1,12 @@
|
||||
import { parse } from './'
|
||||
import { ast, pattern } from './index.fixtures'
|
||||
|
||||
export default ({ benchmark }) => {
|
||||
benchmark('parse', () => {
|
||||
parse(pattern)
|
||||
})
|
||||
|
||||
benchmark('toString', () => {
|
||||
ast.toString()
|
||||
})
|
||||
}
|
||||
@@ -1,10 +1,8 @@
|
||||
'use strict'
|
||||
import * as CM from './'
|
||||
|
||||
const CM = require('./')
|
||||
export const pattern = 'foo !"\\\\ \\"" name:|(wonderwoman batman) hasCape? age:32 chi*go /^foo\\/bar\\./i'
|
||||
|
||||
exports.pattern = 'foo !"\\\\ \\"" name:|(wonderwoman batman) hasCape? age:32 chi*go /^foo\\/bar\\./i'
|
||||
|
||||
exports.ast = new CM.And([
|
||||
export const ast = new CM.And([
|
||||
new CM.String('foo'),
|
||||
new CM.Not(new CM.String('\\ "')),
|
||||
new CM.Property('name', new CM.Or([new CM.String('wonderwoman'), new CM.String('batman')])),
|
||||
@@ -1,6 +1,4 @@
|
||||
'use strict'
|
||||
|
||||
const { escapeRegExp, isPlainObject, some } = require('lodash')
|
||||
import { escapeRegExp, isPlainObject, some } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -25,7 +23,7 @@ class Node {
|
||||
}
|
||||
}
|
||||
|
||||
class Null extends Node {
|
||||
export class Null extends Node {
|
||||
match() {
|
||||
return true
|
||||
}
|
||||
@@ -34,11 +32,10 @@ class Null extends Node {
|
||||
return ''
|
||||
}
|
||||
}
|
||||
exports.Null = Null
|
||||
|
||||
const formatTerms = terms => terms.map(term => term.toString(true)).join(' ')
|
||||
|
||||
class And extends Node {
|
||||
export class And extends Node {
|
||||
constructor(children) {
|
||||
super()
|
||||
|
||||
@@ -57,9 +54,8 @@ class And extends Node {
|
||||
return isNested ? `(${terms})` : terms
|
||||
}
|
||||
}
|
||||
exports.And = And
|
||||
|
||||
class Comparison extends Node {
|
||||
export class Comparison extends Node {
|
||||
constructor(operator, value) {
|
||||
super()
|
||||
this._comparator = Comparison.comparators[operator]
|
||||
@@ -75,7 +71,6 @@ class Comparison extends Node {
|
||||
return this._operator + String(this._value)
|
||||
}
|
||||
}
|
||||
exports.Comparison = Comparison
|
||||
Comparison.comparators = {
|
||||
'>': (a, b) => a > b,
|
||||
'>=': (a, b) => a >= b,
|
||||
@@ -83,7 +78,7 @@ Comparison.comparators = {
|
||||
'<=': (a, b) => a <= b,
|
||||
}
|
||||
|
||||
class Or extends Node {
|
||||
export class Or extends Node {
|
||||
constructor(children) {
|
||||
super()
|
||||
|
||||
@@ -101,9 +96,8 @@ class Or extends Node {
|
||||
return `|(${formatTerms(this.children)})`
|
||||
}
|
||||
}
|
||||
exports.Or = Or
|
||||
|
||||
class Not extends Node {
|
||||
export class Not extends Node {
|
||||
constructor(child) {
|
||||
super()
|
||||
|
||||
@@ -118,9 +112,8 @@ class Not extends Node {
|
||||
return '!' + this.child.toString(true)
|
||||
}
|
||||
}
|
||||
exports.Not = Not
|
||||
|
||||
exports.Number = exports.NumberNode = class NumberNode extends Node {
|
||||
export class NumberNode extends Node {
|
||||
constructor(value) {
|
||||
super()
|
||||
|
||||
@@ -140,8 +133,9 @@ exports.Number = exports.NumberNode = class NumberNode extends Node {
|
||||
return String(this.value)
|
||||
}
|
||||
}
|
||||
export { NumberNode as Number }
|
||||
|
||||
class NumberOrStringNode extends Node {
|
||||
export class NumberOrStringNode extends Node {
|
||||
constructor(value) {
|
||||
super()
|
||||
|
||||
@@ -166,9 +160,9 @@ class NumberOrStringNode extends Node {
|
||||
return this.value
|
||||
}
|
||||
}
|
||||
exports.NumberOrString = exports.NumberOrStringNode = NumberOrStringNode
|
||||
export { NumberOrStringNode as NumberOrString }
|
||||
|
||||
class Property extends Node {
|
||||
export class Property extends Node {
|
||||
constructor(name, child) {
|
||||
super()
|
||||
|
||||
@@ -184,13 +178,12 @@ class Property extends Node {
|
||||
return `${formatString(this.name)}:${this.child.toString(true)}`
|
||||
}
|
||||
}
|
||||
exports.Property = Property
|
||||
|
||||
const escapeChar = char => '\\' + char
|
||||
const formatString = value =>
|
||||
Number.isNaN(+value) ? (isRawString(value) ? value : `"${value.replace(/\\|"/g, escapeChar)}"`) : `"${value}"`
|
||||
|
||||
class GlobPattern extends Node {
|
||||
export class GlobPattern extends Node {
|
||||
constructor(value) {
|
||||
// fallback to string node if no wildcard
|
||||
if (value.indexOf('*') === -1) {
|
||||
@@ -223,9 +216,8 @@ class GlobPattern extends Node {
|
||||
return this.value
|
||||
}
|
||||
}
|
||||
exports.GlobPattern = GlobPattern
|
||||
|
||||
class RegExpNode extends Node {
|
||||
export class RegExpNode extends Node {
|
||||
constructor(pattern, flags) {
|
||||
super()
|
||||
|
||||
@@ -253,9 +245,9 @@ class RegExpNode extends Node {
|
||||
return this.re.toString()
|
||||
}
|
||||
}
|
||||
exports.RegExp = RegExpNode
|
||||
export { RegExpNode as RegExp }
|
||||
|
||||
class StringNode extends Node {
|
||||
export class StringNode extends Node {
|
||||
constructor(value) {
|
||||
super()
|
||||
|
||||
@@ -283,9 +275,9 @@ class StringNode extends Node {
|
||||
return formatString(this.value)
|
||||
}
|
||||
}
|
||||
exports.String = exports.StringNode = StringNode
|
||||
export { StringNode as String }
|
||||
|
||||
class TruthyProperty extends Node {
|
||||
export class TruthyProperty extends Node {
|
||||
constructor(name) {
|
||||
super()
|
||||
|
||||
@@ -300,7 +292,6 @@ class TruthyProperty extends Node {
|
||||
return formatString(this.name) + '?'
|
||||
}
|
||||
}
|
||||
exports.TruthyProperty = TruthyProperty
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -540,7 +531,7 @@ const parser = P.grammar({
|
||||
),
|
||||
ws: P.regex(/\s*/),
|
||||
}).default
|
||||
exports.parse = parser.parse.bind(parser)
|
||||
export const parse = parser.parse.bind(parser)
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -582,7 +573,7 @@ const _getPropertyClauseStrings = ({ child }) => {
|
||||
}
|
||||
|
||||
// Find possible values for property clauses in a and clause.
|
||||
exports.getPropertyClausesStrings = function getPropertyClausesStrings(node) {
|
||||
export const getPropertyClausesStrings = node => {
|
||||
if (!node) {
|
||||
return {}
|
||||
}
|
||||
@@ -614,7 +605,7 @@ exports.getPropertyClausesStrings = function getPropertyClausesStrings(node) {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
exports.setPropertyClause = function setPropertyClause(node, name, child) {
|
||||
export const setPropertyClause = (node, name, child) => {
|
||||
const property = child && new Property(name, typeof child === 'string' ? new StringNode(child) : child)
|
||||
|
||||
if (node === undefined) {
|
||||
@@ -1,9 +1,7 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
'use strict'
|
||||
|
||||
const { ast, pattern } = require('./index.fixtures')
|
||||
const {
|
||||
import { ast, pattern } from './index.fixtures'
|
||||
import {
|
||||
getPropertyClausesStrings,
|
||||
GlobPattern,
|
||||
Null,
|
||||
@@ -13,7 +11,7 @@ const {
|
||||
Property,
|
||||
setPropertyClause,
|
||||
StringNode,
|
||||
} = require('./')
|
||||
} from './'
|
||||
|
||||
it('getPropertyClausesStrings', () => {
|
||||
const tmp = getPropertyClausesStrings(parse('foo bar:baz baz:|(foo bar /^boo$/ /^far$/) foo:/^bar$/'))
|
||||
3
packages/value-matcher/.babelrc.js
Normal file
3
packages/value-matcher/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
1
packages/value-matcher/.eslintrc.js
Symbolic link
1
packages/value-matcher/.eslintrc.js
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -16,13 +16,27 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const match = (pattern, value) => {
|
||||
if (Array.isArray(pattern)) {
|
||||
return (
|
||||
@@ -45,6 +43,4 @@ const match = (pattern, value) => {
|
||||
return pattern === value
|
||||
}
|
||||
|
||||
exports.createPredicate = function createPredicate(pattern) {
|
||||
return value => match(pattern, value)
|
||||
}
|
||||
export const createPredicate = pattern => value => match(pattern, value)
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "vhd-cli",
|
||||
"version": "0.7.2",
|
||||
"version": "0.7.1",
|
||||
"license": "ISC",
|
||||
"description": "Tools to read/create and merge VHD files",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-cli",
|
||||
@@ -24,14 +24,14 @@
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^1.0.3",
|
||||
"@xen-orchestra/fs": "^1.0.1",
|
||||
"cli-progress": "^3.1.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"human-format": "^1.0.0",
|
||||
"lodash": "^4.17.21",
|
||||
"uuid": "^8.3.2",
|
||||
"vhd-lib": "^3.2.0"
|
||||
"vhd-lib": "^3.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -104,7 +104,7 @@ exports.VhdAbstract = class VhdAbstract {
|
||||
*
|
||||
* @returns {number} the merged data size
|
||||
*/
|
||||
async mergeBlock(child, blockId) {
|
||||
async coalesceBlock(child, blockId) {
|
||||
const block = await child.readBlock(blockId)
|
||||
await this.writeEntireBlock(block)
|
||||
return block.data.length
|
||||
|
||||
@@ -53,25 +53,19 @@ test('Can coalesce block', async () => {
|
||||
const childDirectoryVhd = yield openVhd(handler, childDirectoryName)
|
||||
await childDirectoryVhd.readBlockAllocationTable()
|
||||
|
||||
let childBlockData = (await childDirectoryVhd.readBlock(0)).data
|
||||
await parentVhd.mergeBlock(childDirectoryVhd, 0)
|
||||
await parentVhd.coalesceBlock(childFileVhd, 0)
|
||||
await parentVhd.writeFooter()
|
||||
await parentVhd.writeBlockAllocationTable()
|
||||
let parentBlockData = (await parentVhd.readBlock(0)).data
|
||||
// block should be present in parent
|
||||
let childBlockData = (await childFileVhd.readBlock(0)).data
|
||||
expect(parentBlockData.equals(childBlockData)).toEqual(true)
|
||||
// block should not be in child since it's a rename for vhd directory
|
||||
await expect(childDirectoryVhd.readBlock(0)).rejects.toThrowError()
|
||||
|
||||
childBlockData = (await childFileVhd.readBlock(1)).data
|
||||
await parentVhd.mergeBlock(childFileVhd, 1)
|
||||
await parentVhd.coalesceBlock(childDirectoryVhd, 0)
|
||||
await parentVhd.writeFooter()
|
||||
await parentVhd.writeBlockAllocationTable()
|
||||
parentBlockData = (await parentVhd.readBlock(1)).data
|
||||
// block should be present in parent in case of mixed vhdfile/vhddirectory
|
||||
expect(parentBlockData.equals(childBlockData)).toEqual(true)
|
||||
// block should still be child
|
||||
await childFileVhd.readBlock(1)
|
||||
parentBlockData = (await parentVhd.readBlock(0)).data
|
||||
childBlockData = (await childDirectoryVhd.readBlock(0)).data
|
||||
expect(parentBlockData).toEqual(childBlockData)
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -142,13 +142,13 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
return test(this.#blockTable, blockId)
|
||||
}
|
||||
|
||||
#getChunkPath(partName) {
|
||||
_getChunkPath(partName) {
|
||||
return this._path + '/' + partName
|
||||
}
|
||||
|
||||
async _readChunk(partName) {
|
||||
// here we can implement compression and / or crypto
|
||||
const buffer = await this._handler.readFile(this.#getChunkPath(partName))
|
||||
const buffer = await this._handler.readFile(this._getChunkPath(partName))
|
||||
|
||||
const uncompressed = await this.#compressor.decompress(buffer)
|
||||
return {
|
||||
@@ -164,20 +164,16 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
)
|
||||
|
||||
const compressed = await this.#compressor.compress(buffer)
|
||||
return this._handler.outputFile(this.#getChunkPath(partName), compressed, this._opts)
|
||||
return this._handler.outputFile(this._getChunkPath(partName), compressed, this._opts)
|
||||
}
|
||||
|
||||
// put block in subdirectories to limit impact when doing directory listing
|
||||
#getBlockPath(blockId) {
|
||||
_getBlockPath(blockId) {
|
||||
const blockPrefix = Math.floor(blockId / 1e3)
|
||||
const blockSuffix = blockId - blockPrefix * 1e3
|
||||
return `blocks/${blockPrefix}/${blockSuffix}`
|
||||
}
|
||||
|
||||
_getFullBlockPath(blockId) {
|
||||
return this.#getChunkPath(this.#getBlockPath(blockId))
|
||||
}
|
||||
|
||||
async readHeaderAndFooter() {
|
||||
await this.#readChunkFilters()
|
||||
|
||||
@@ -204,7 +200,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
if (onlyBitmap) {
|
||||
throw new Error(`reading 'bitmap of block' ${blockId} in a VhdDirectory is not implemented`)
|
||||
}
|
||||
const { buffer } = await this._readChunk(this.#getBlockPath(blockId))
|
||||
const { buffer } = await this._readChunk(this._getBlockPath(blockId))
|
||||
return {
|
||||
id: blockId,
|
||||
bitmap: buffer.slice(0, this.bitmapSize),
|
||||
@@ -244,39 +240,25 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
}
|
||||
|
||||
// only works if data are in the same handler
|
||||
// and if the full block is modified in child ( which is the case with xcp)
|
||||
// and if the full block is modified in child ( which is the case whit xcp)
|
||||
// and if the compression type is same on both sides
|
||||
async mergeBlock(child, blockId, isResumingMerge = false) {
|
||||
const childBlockPath = child._getFullBlockPath?.(blockId)
|
||||
async coalesceBlock(child, blockId) {
|
||||
if (
|
||||
childBlockPath !== undefined ||
|
||||
!(child instanceof VhdDirectory) ||
|
||||
this._handler !== child._handler ||
|
||||
child.compressionType !== this.compressionType ||
|
||||
child.compressionType === 'MIXED'
|
||||
child.compressionType !== this.compressionType
|
||||
) {
|
||||
return super.mergeBlock(child, blockId)
|
||||
return super.coalesceBlock(child, blockId)
|
||||
}
|
||||
try {
|
||||
await this._handler.rename(childBlockPath, this._getFullBlockPath(blockId))
|
||||
} catch (error) {
|
||||
if (error.code === 'ENOENT' && isResumingMerge === true) {
|
||||
// when resuming, the blocks moved since the last merge state write are
|
||||
// not in the child anymore but it should be ok
|
||||
|
||||
// it will throw an error if block is missing in parent
|
||||
// won't detect if the block was already in parent and is broken/missing in child
|
||||
const { data } = await this.readBlock(blockId)
|
||||
assert.strictEqual(data.length, this.header.blockSize)
|
||||
} else {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
setBitmap(this.#blockTable, blockId)
|
||||
await this._handler.copy(
|
||||
child._getChunkPath(child._getBlockPath(blockId)),
|
||||
this._getChunkPath(this._getBlockPath(blockId))
|
||||
)
|
||||
return sectorsToBytes(this.sectorsPerBlock)
|
||||
}
|
||||
|
||||
async writeEntireBlock(block) {
|
||||
await this._writeChunk(this.#getBlockPath(block.id), block.buffer)
|
||||
await this._writeChunk(this._getBlockPath(block.id), block.buffer)
|
||||
setBitmap(this.#blockTable, block.id)
|
||||
}
|
||||
|
||||
|
||||
@@ -222,14 +222,14 @@ test('Can coalesce block', async () => {
|
||||
const childDirectoryVhd = yield openVhd(handler, childDirectoryName)
|
||||
await childDirectoryVhd.readBlockAllocationTable()
|
||||
|
||||
await parentVhd.mergeBlock(childFileVhd, 0)
|
||||
await parentVhd.coalesceBlock(childFileVhd, 0)
|
||||
await parentVhd.writeFooter()
|
||||
await parentVhd.writeBlockAllocationTable()
|
||||
let parentBlockData = (await parentVhd.readBlock(0)).data
|
||||
let childBlockData = (await childFileVhd.readBlock(0)).data
|
||||
expect(parentBlockData).toEqual(childBlockData)
|
||||
|
||||
await parentVhd.mergeBlock(childDirectoryVhd, 0)
|
||||
await parentVhd.coalesceBlock(childDirectoryVhd, 0)
|
||||
await parentVhd.writeFooter()
|
||||
await parentVhd.writeBlockAllocationTable()
|
||||
parentBlockData = (await parentVhd.readBlock(0)).data
|
||||
|
||||
@@ -43,16 +43,6 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
get compressionType() {
|
||||
const compressionType = this.vhds[0].compressionType
|
||||
for (let i = 0; i < this.vhds.length; i++) {
|
||||
if (compressionType !== this.vhds[i].compressionType) {
|
||||
return 'MIXED'
|
||||
}
|
||||
}
|
||||
return compressionType
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {Array<VhdAbstract>} vhds the chain of Vhds used to compute this Vhd, from the deepest child (in position 0), to the root (in the last position)
|
||||
* only the last one can have any type. Other must have type DISK_TYPES.DIFFERENCING (delta)
|
||||
@@ -84,28 +74,17 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
#getVhdWithBlock(blockId) {
|
||||
async readBlock(blockId, onlyBitmap = false) {
|
||||
const index = this.#vhds.findIndex(vhd => vhd.containsBlock(blockId))
|
||||
assert(index !== -1, `no such block ${blockId}`)
|
||||
return this.#vhds[index]
|
||||
}
|
||||
|
||||
async readBlock(blockId, onlyBitmap = false) {
|
||||
// only read the content of the first vhd containing this block
|
||||
return await this.#getVhdWithBlock(blockId).readBlock(blockId, onlyBitmap)
|
||||
}
|
||||
|
||||
async mergeBlock(child, blockId) {
|
||||
throw new Error(`can't coalesce block into a vhd synthetic`)
|
||||
return await this.#vhds[index].readBlock(blockId, onlyBitmap)
|
||||
}
|
||||
|
||||
_readParentLocatorData(id) {
|
||||
return this.#vhds[this.#vhds.length - 1]._readParentLocatorData(id)
|
||||
}
|
||||
_getFullBlockPath(blockId) {
|
||||
const vhd = this.#getVhdWithBlock(blockId)
|
||||
return vhd?._getFullBlockPath(blockId)
|
||||
}
|
||||
}
|
||||
|
||||
// add decorated static method
|
||||
|
||||
@@ -6,8 +6,7 @@ exports.checkVhdChain = require('./checkChain')
|
||||
exports.createReadableSparseStream = require('./createReadableSparseStream')
|
||||
exports.createVhdStreamWithLength = require('./createVhdStreamWithLength')
|
||||
exports.createVhdDirectoryFromStream = require('./createVhdDirectoryFromStream').createVhdDirectoryFromStream
|
||||
const { mergeVhd } = require('./merge')
|
||||
exports.mergeVhd = mergeVhd
|
||||
exports.mergeVhd = require('./merge')
|
||||
exports.peekFooterFromVhdStream = require('./peekFooterFromVhdStream')
|
||||
exports.openVhd = require('./openVhd').openVhd
|
||||
exports.VhdAbstract = require('./Vhd/VhdAbstract').VhdAbstract
|
||||
|
||||
@@ -9,7 +9,6 @@ const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
|
||||
const { VhdFile, chainVhd, mergeVhd } = require('./index')
|
||||
const { _cleanupVhds: cleanupVhds } = require('./merge')
|
||||
|
||||
const { checkFile, createRandomFile, convertFromRawToVhd } = require('./tests/utils')
|
||||
|
||||
@@ -39,15 +38,14 @@ test('merge works in normal cases', async () => {
|
||||
await createRandomFile(`${tempDir}/${childRandomFileName}`, mbOfChildren)
|
||||
await convertFromRawToVhd(`${tempDir}/${childRandomFileName}`, `${tempDir}/${child1FileName}`)
|
||||
await chainVhd(handler, parentFileName, handler, child1FileName, true)
|
||||
await checkFile(`${tempDir}/${parentFileName}`)
|
||||
|
||||
// merge
|
||||
await mergeVhd(handler, parentFileName, handler, child1FileName)
|
||||
|
||||
// check that the merged vhd is still valid
|
||||
await checkFile(`${tempDir}/${child1FileName}`)
|
||||
// check that vhd is still valid
|
||||
await checkFile(`${tempDir}/${parentFileName}`)
|
||||
|
||||
const parentVhd = new VhdFile(handler, child1FileName)
|
||||
const parentVhd = new VhdFile(handler, parentFileName)
|
||||
await parentVhd.readHeaderAndFooter()
|
||||
await parentVhd.readBlockAllocationTable()
|
||||
|
||||
@@ -140,11 +138,11 @@ test('it can resume a merge ', async () => {
|
||||
await mergeVhd(handler, 'parent.vhd', handler, 'child1.vhd')
|
||||
|
||||
// reload header footer and block allocation table , they should succed
|
||||
await childVhd.readHeaderAndFooter()
|
||||
await childVhd.readBlockAllocationTable()
|
||||
await parentVhd.readHeaderAndFooter()
|
||||
await parentVhd.readBlockAllocationTable()
|
||||
let offset = 0
|
||||
// check that the data are the same as source
|
||||
for await (const block of childVhd.blocks()) {
|
||||
for await (const block of parentVhd.blocks()) {
|
||||
const blockContent = block.data
|
||||
// first block is marked as already merged, should not be modified
|
||||
// second block should come from children
|
||||
@@ -155,7 +153,7 @@ test('it can resume a merge ', async () => {
|
||||
await fs.read(fd, buffer, 0, buffer.length, offset)
|
||||
|
||||
expect(buffer.equals(blockContent)).toEqual(true)
|
||||
offset += childVhd.header.blockSize
|
||||
offset += parentVhd.header.blockSize
|
||||
}
|
||||
})
|
||||
|
||||
@@ -185,9 +183,9 @@ test('it merge multiple child in one pass ', async () => {
|
||||
await mergeVhd(handler, parentFileName, handler, [grandChildFileName, childFileName])
|
||||
|
||||
// check that vhd is still valid
|
||||
await checkFile(grandChildFileName)
|
||||
await checkFile(parentFileName)
|
||||
|
||||
const parentVhd = new VhdFile(handler, grandChildFileName)
|
||||
const parentVhd = new VhdFile(handler, parentFileName)
|
||||
await parentVhd.readHeaderAndFooter()
|
||||
await parentVhd.readBlockAllocationTable()
|
||||
|
||||
@@ -208,21 +206,3 @@ test('it merge multiple child in one pass ', async () => {
|
||||
offset += parentVhd.header.blockSize
|
||||
}
|
||||
})
|
||||
|
||||
test('it cleans vhd mergedfiles', async () => {
|
||||
const handler = getHandler({ url: `file://${tempDir}` })
|
||||
|
||||
await handler.writeFile('parent', 'parentData')
|
||||
await handler.writeFile('child1', 'child1Data')
|
||||
await handler.writeFile('child2', 'child2Data')
|
||||
await handler.writeFile('child3', 'child3Data')
|
||||
|
||||
// childPath is from the grand children to the children
|
||||
await cleanupVhds(handler, 'parent', ['child3', 'child2', 'child1'], { remove: true })
|
||||
|
||||
// only child3 should stay, with the data of parent
|
||||
const [child3, ...other] = await handler.list('.')
|
||||
expect(other.length).toEqual(0)
|
||||
expect(child3).toEqual('child3')
|
||||
expect((await handler.readFile('child3')).toString('utf8')).toEqual('parentData')
|
||||
})
|
||||
|
||||
@@ -12,35 +12,11 @@ const { basename, dirname } = require('path')
|
||||
const { DISK_TYPES } = require('./_constants')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
const { VhdAbstract } = require('./Vhd/VhdAbstract')
|
||||
const { VhdDirectory } = require('./Vhd/VhdDirectory')
|
||||
const { VhdSynthetic } = require('./Vhd/VhdSynthetic')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
const { warn } = createLogger('vhd-lib:merge')
|
||||
|
||||
// the chain we want to merge is [ ancestor, child1, ..., childn]
|
||||
// this chain can have grand children or grand parent
|
||||
//
|
||||
// 1. Create a VhdSynthetic from all children if more than 1 child are merged
|
||||
// 2. Merge the resulting vhd into the ancestor
|
||||
// 2.a if at least one is a file : copy file part from child to parent
|
||||
// 2.b if they are all vhd directory : move blocks from children to the ancestor
|
||||
// 3. update the size, uuid and timestamp of the ancestor with those of child n
|
||||
// 3. Delete all (now) unused VHDs
|
||||
// 4. Rename the ancestor to to child n
|
||||
//
|
||||
// VhdSynthetic
|
||||
// |
|
||||
// /‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾\
|
||||
// [ ancestor, child1, ...,child n-1, child n ]
|
||||
// | \___________________/ ^
|
||||
// | | |
|
||||
// | unused VHDs |
|
||||
// | |
|
||||
// \___________rename_____________/
|
||||
|
||||
// write the merge progress file at most every `delay` seconds
|
||||
function makeThrottledWriter(handler, path, delay) {
|
||||
let lastWrite = Date.now()
|
||||
return async json => {
|
||||
@@ -52,45 +28,21 @@ function makeThrottledWriter(handler, path, delay) {
|
||||
}
|
||||
}
|
||||
|
||||
// make the rename / delete part of the merge process
|
||||
// will fail if parent and children are in different remote
|
||||
|
||||
function cleanupVhds(handler, parent, children, { logInfo = noop, remove = false } = {}) {
|
||||
if (!Array.isArray(children)) {
|
||||
children = [children]
|
||||
}
|
||||
const mergeTargetChild = children.shift()
|
||||
|
||||
return Promise.all([
|
||||
VhdAbstract.rename(handler, parent, mergeTargetChild),
|
||||
asyncMap(children, child => {
|
||||
logInfo(`the VHD child is already merged`, { child })
|
||||
if (remove) {
|
||||
logInfo(`deleting merged VHD child`, { child })
|
||||
return VhdAbstract.unlink(handler, child)
|
||||
}
|
||||
}),
|
||||
])
|
||||
}
|
||||
module.exports._cleanupVhds = cleanupVhds
|
||||
|
||||
// Merge one or multiple vhd child into vhd parent.
|
||||
// childPath can be array to create a synthetic VHD from multiple VHDs
|
||||
// childPath is from the grand children to the children
|
||||
//
|
||||
// TODO: rename the VHD file during the merge
|
||||
module.exports.mergeVhd = limitConcurrency(2)(async function merge(
|
||||
module.exports = limitConcurrency(2)(async function merge(
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath,
|
||||
{ onProgress = noop, logInfo = noop, remove } = {}
|
||||
{ onProgress = noop } = {}
|
||||
) {
|
||||
const mergeStatePath = dirname(parentPath) + '/' + '.' + basename(parentPath) + '.merge.json'
|
||||
|
||||
return await Disposable.use(async function* () {
|
||||
let mergeState
|
||||
let isResuming = false
|
||||
try {
|
||||
const mergeStateContent = await parentHandler.readFile(mergeStatePath)
|
||||
mergeState = JSON.parse(mergeStateContent)
|
||||
@@ -123,7 +75,6 @@ module.exports.mergeVhd = limitConcurrency(2)(async function merge(
|
||||
assert.strictEqual(childVhd.footer.diskType, DISK_TYPES.DIFFERENCING)
|
||||
assert.strictEqual(childVhd.header.blockSize, parentVhd.header.blockSize)
|
||||
} else {
|
||||
isResuming = true
|
||||
// vhd should not have changed to resume
|
||||
assert.strictEqual(parentVhd.header.checksum, mergeState.parent.header)
|
||||
assert.strictEqual(childVhd.header.checksum, mergeState.child.header)
|
||||
@@ -164,12 +115,12 @@ module.exports.mergeVhd = limitConcurrency(2)(async function merge(
|
||||
let counter = 0
|
||||
|
||||
const mergeStateWriter = makeThrottledWriter(parentHandler, mergeStatePath, 10e3)
|
||||
|
||||
await asyncEach(
|
||||
toMerge,
|
||||
async blockId => {
|
||||
merging.add(blockId)
|
||||
mergeState.mergedDataSize += await parentVhd.mergeBlock(childVhd, blockId, isResuming)
|
||||
|
||||
mergeState.mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
|
||||
merging.delete(blockId)
|
||||
|
||||
onProgress({
|
||||
@@ -204,8 +155,6 @@ module.exports.mergeVhd = limitConcurrency(2)(async function merge(
|
||||
// should be a disposable
|
||||
parentHandler.unlink(mergeStatePath).catch(warn)
|
||||
|
||||
await cleanupVhds(parentHandler, parentPath, childPath, { logInfo, remove })
|
||||
|
||||
return mergeState.mergedDataSize
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "vhd-lib",
|
||||
"version": "3.2.0",
|
||||
"version": "3.1.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
|
||||
@@ -29,7 +29,7 @@
|
||||
"uuid": "^8.3.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@xen-orchestra/fs": "^1.0.3",
|
||||
"@xen-orchestra/fs": "^1.0.1",
|
||||
"execa": "^5.0.0",
|
||||
"get-stream": "^6.0.0",
|
||||
"rimraf": "^3.0.2",
|
||||
|
||||
@@ -3,10 +3,11 @@
|
||||
const { readChunk } = require('@vates/read-chunk')
|
||||
|
||||
const { FOOTER_SIZE } = require('./_constants')
|
||||
const { unpackFooter } = require('./Vhd/_utils.js')
|
||||
const { fuFooter } = require('./_structs')
|
||||
|
||||
module.exports = async function peekFooterFromStream(stream) {
|
||||
const buffer = await readChunk(stream, FOOTER_SIZE)
|
||||
stream.unshift(buffer)
|
||||
return unpackFooter(buffer)
|
||||
const footerBuffer = await readChunk(stream, FOOTER_SIZE)
|
||||
const footer = fuFooter.unpack(footerBuffer)
|
||||
stream.unshift(footerBuffer)
|
||||
return footer
|
||||
}
|
||||
|
||||
@@ -85,9 +85,10 @@ async function convertToVhdDirectory(rawFileName, vhdFileName, path) {
|
||||
await fs.mkdir(path + '/blocks/0/')
|
||||
const stats = await fs.stat(rawFileName)
|
||||
|
||||
for (let i = 0, offset = 0; offset < stats.size; i++, offset += blockDataSize) {
|
||||
const sizeMB = stats.size / 1024 / 1024
|
||||
for (let i = 0, offset = 0; i < sizeMB; i++, offset += blockDataSize) {
|
||||
const blockData = Buffer.alloc(blockDataSize)
|
||||
await fs.read(srcRaw, blockData, 0, blockData.length, offset)
|
||||
await fs.read(srcRaw, blockData, offset)
|
||||
await fs.writeFile(path + '/blocks/0/' + i, Buffer.concat([bitmap, blockData]))
|
||||
}
|
||||
await fs.close(srcRaw)
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"human-format": "^1.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^1.2.1"
|
||||
"xen-api": "^1.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
|
||||
@@ -8,6 +8,6 @@
|
||||
"promise-toolbox": "^0.19.2",
|
||||
"readable-stream": "^3.1.1",
|
||||
"throttle": "^1.0.3",
|
||||
"vhd-lib": "^3.2.0"
|
||||
"vhd-lib": "^3.1.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "xen-api",
|
||||
"version": "1.2.1",
|
||||
"version": "1.2.0",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
|
||||
@@ -465,8 +465,6 @@ export class Xapi extends EventEmitter {
|
||||
await this._setHostAddressInUrl(url, host)
|
||||
|
||||
const doRequest = httpRequest.put.bind(undefined, $cancelToken, {
|
||||
agent: this.httpAgent,
|
||||
|
||||
body,
|
||||
headers,
|
||||
rejectUnauthorized: !this._allowUnauthorized,
|
||||
@@ -488,6 +486,7 @@ export class Xapi extends EventEmitter {
|
||||
query: 'task_id' in query ? omit(query, 'task_id') : query,
|
||||
|
||||
maxRedirects: 0,
|
||||
agent: this.httpAgent,
|
||||
}).then(
|
||||
response => {
|
||||
response.cancel()
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { ensureDir as mkdirp } from 'fs-extra'
|
||||
import { readFile, writeFile } from 'fs/promises'
|
||||
import { xdgConfig } from 'xdg-basedir'
|
||||
import lodashGet from 'lodash/get.js'
|
||||
import lodashUnset from 'lodash/unset.js'
|
||||
import xdgBasedir from 'xdg-basedir'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const configPath = xdgConfig + '/xo-cli'
|
||||
const configPath = xdgBasedir.config + '/xo-cli'
|
||||
const configFile = configPath + '/config.json'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -75,23 +75,13 @@ async function parseRegisterArgs(args) {
|
||||
}
|
||||
}
|
||||
|
||||
async function _createToken({ allowUnauthorized, description, email, expiresIn, password, url }) {
|
||||
async function _createToken({ allowUnauthorized, email, expiresIn, password, url }) {
|
||||
const xo = new Xo({ rejectUnauthorized: !allowUnauthorized, url })
|
||||
await xo.open()
|
||||
try {
|
||||
await xo.signIn({ email, password })
|
||||
console.warn('Successfully logged with', xo.user.email)
|
||||
await xo.signIn({ email, password })
|
||||
console.warn('Successfully logged with', xo.user.email)
|
||||
|
||||
return await xo.call('token.create', { description, expiresIn }).catch(error => {
|
||||
// if invalid parameter error, retry without description for backward compatibility
|
||||
if (error.code === 10) {
|
||||
return xo.call('token.create', { expiresIn })
|
||||
}
|
||||
throw error
|
||||
})
|
||||
} finally {
|
||||
await xo.close()
|
||||
}
|
||||
return await xo.call('token.create', { expiresIn })
|
||||
}
|
||||
|
||||
function createOutputStream(path) {
|
||||
@@ -282,10 +272,7 @@ function main(args) {
|
||||
COMMANDS.help = help
|
||||
|
||||
async function createToken(args) {
|
||||
const opts = await parseRegisterArgs(args)
|
||||
opts.description = 'xo-cli --createToken'
|
||||
|
||||
const token = await _createToken(opts)
|
||||
const token = await _createToken(await parseRegisterArgs(args))
|
||||
console.warn('Authentication token created')
|
||||
console.warn()
|
||||
console.log(token)
|
||||
@@ -294,7 +281,6 @@ COMMANDS.createToken = createToken
|
||||
|
||||
async function register(args) {
|
||||
const opts = await parseRegisterArgs(args)
|
||||
opts.description = 'xo-cli --register'
|
||||
|
||||
await config.set({
|
||||
allowUnauthorized: opts.allowUnauthorized,
|
||||
@@ -411,67 +397,64 @@ async function call(args) {
|
||||
delete params['@']
|
||||
|
||||
const xo = await connect()
|
||||
try {
|
||||
// FIXME: do not use private properties.
|
||||
const baseUrl = xo._url.replace(/^ws/, 'http')
|
||||
const httpOptions = {
|
||||
rejectUnauthorized: !(await config.load()).allowUnauthorized,
|
||||
|
||||
// FIXME: do not use private properties.
|
||||
const baseUrl = xo._url.replace(/^ws/, 'http')
|
||||
const httpOptions = {
|
||||
rejectUnauthorized: !(await config.load()).allowUnauthorized,
|
||||
}
|
||||
|
||||
const result = await xo.call(method, params)
|
||||
let keys, key, url
|
||||
if (isObject(result) && (keys = getKeys(result)).length === 1) {
|
||||
key = keys[0]
|
||||
|
||||
if (key === '$getFrom') {
|
||||
ensurePathParam(method, file)
|
||||
url = new URL(result[key], baseUrl)
|
||||
const output = createOutputStream(file)
|
||||
const response = await hrp(url, httpOptions)
|
||||
|
||||
const progress = progressStream(
|
||||
{
|
||||
length: response.headers['content-length'],
|
||||
time: 1e3,
|
||||
},
|
||||
printProgress
|
||||
)
|
||||
|
||||
return fromCallback(pipeline, response, progress, output)
|
||||
}
|
||||
|
||||
const result = await xo.call(method, params)
|
||||
let keys, key, url
|
||||
if (isObject(result) && (keys = getKeys(result)).length === 1) {
|
||||
key = keys[0]
|
||||
if (key === '$sendTo') {
|
||||
ensurePathParam(method, file)
|
||||
url = new URL(result[key], baseUrl)
|
||||
|
||||
if (key === '$getFrom') {
|
||||
ensurePathParam(method, file)
|
||||
url = new URL(result[key], baseUrl)
|
||||
const output = createOutputStream(file)
|
||||
const response = await hrp(url, httpOptions)
|
||||
|
||||
const progress = progressStream(
|
||||
const { size: length } = await stat(file)
|
||||
const input = pipeline(
|
||||
createReadStream(file),
|
||||
progressStream(
|
||||
{
|
||||
length: response.headers['content-length'],
|
||||
length,
|
||||
time: 1e3,
|
||||
},
|
||||
printProgress
|
||||
)
|
||||
),
|
||||
noop
|
||||
)
|
||||
|
||||
return fromCallback(pipeline, response, progress, output)
|
||||
}
|
||||
|
||||
if (key === '$sendTo') {
|
||||
ensurePathParam(method, file)
|
||||
url = new URL(result[key], baseUrl)
|
||||
|
||||
const { size: length } = await stat(file)
|
||||
const input = pipeline(
|
||||
createReadStream(file),
|
||||
progressStream(
|
||||
{
|
||||
length,
|
||||
time: 1e3,
|
||||
},
|
||||
printProgress
|
||||
),
|
||||
noop
|
||||
)
|
||||
|
||||
return hrp
|
||||
.post(url, httpOptions, {
|
||||
body: input,
|
||||
headers: {
|
||||
'content-length': length,
|
||||
},
|
||||
})
|
||||
.readAll('utf-8')
|
||||
}
|
||||
return hrp
|
||||
.post(url, httpOptions, {
|
||||
body: input,
|
||||
headers: {
|
||||
'content-length': length,
|
||||
},
|
||||
})
|
||||
.readAll('utf-8')
|
||||
}
|
||||
|
||||
return result
|
||||
} finally {
|
||||
await xo.close()
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
COMMANDS.call = call
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "xo-cli",
|
||||
"version": "0.14.0",
|
||||
"version": "0.13.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Basic CLI for Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -29,7 +29,7 @@
|
||||
"node": ">=14.13"
|
||||
},
|
||||
"dependencies": {
|
||||
"chalk": "^5.0.1",
|
||||
"chalk": "^4.1.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"fs-extra": "^10.0.0",
|
||||
"getopts": "^2.3.0",
|
||||
@@ -37,11 +37,11 @@
|
||||
"human-format": "^1.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"micromatch": "^4.0.2",
|
||||
"pretty-ms": "^8.0.0",
|
||||
"pretty-ms": "^7.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"pw": "^0.0.4",
|
||||
"xdg-basedir": "^5.1.0",
|
||||
"xdg-basedir": "^4.0.0",
|
||||
"xo-lib": "^0.11.1"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -2,51 +2,26 @@ import filter from 'lodash/filter'
|
||||
import map from 'lodash/map'
|
||||
import trim from 'lodash/trim'
|
||||
import trimStart from 'lodash/trimStart'
|
||||
import queryString from 'querystring'
|
||||
import urlParser from 'url-parse'
|
||||
|
||||
const NFS_RE = /^([^:]+):(?:(\d+):)?([^:?]+)(\?[^?]*)?$/
|
||||
const SMB_RE = /^([^:]+):(.+)@([^@]+)\\\\([^\0?]+)(?:\0([^?]*))?(\?[^?]*)?$/
|
||||
const NFS_RE = /^([^:]+):(?:(\d+):)?([^:]+)$/
|
||||
const SMB_RE = /^([^:]+):(.+)@([^@]+)\\\\([^\0]+)(?:\0(.*))?$/
|
||||
|
||||
const sanitizePath = (...paths) => filter(map(paths, s => s && filter(map(s.split('/'), trim)).join('/'))).join('/')
|
||||
|
||||
const parseOptionList = (optionList = '') => {
|
||||
if (optionList.startsWith('?')) {
|
||||
optionList = optionList.substring(1)
|
||||
}
|
||||
const parsed = queryString.parse(optionList)
|
||||
Object.keys(parsed).forEach(key => {
|
||||
const val = parsed[key]
|
||||
parsed[key] = JSON.parse(val)
|
||||
})
|
||||
return parsed
|
||||
}
|
||||
|
||||
const makeOptionList = options => {
|
||||
const encoded = {}
|
||||
|
||||
Object.keys(options).forEach(key => {
|
||||
const val = options[key]
|
||||
encoded[key] = JSON.stringify(val)
|
||||
})
|
||||
return queryString.stringify(encoded)
|
||||
}
|
||||
|
||||
export const parse = string => {
|
||||
let object = {}
|
||||
let [type, rest] = string.split('://')
|
||||
const object = {}
|
||||
|
||||
const [type, rest] = string.split('://')
|
||||
if (type === 'file') {
|
||||
object.type = 'file'
|
||||
let optionList
|
||||
;[rest, optionList] = rest.split('?')
|
||||
object.path = `/${trimStart(rest, '/')}` // the leading slash has been forgotten on client side first implementation
|
||||
object = { ...parseOptionList(optionList), ...object }
|
||||
} else if (type === 'nfs') {
|
||||
object.type = 'nfs'
|
||||
let host, port, path, optionList
|
||||
let host, port, path
|
||||
// Some users have a remote with a colon in the URL, which breaks the parsing since this commit: https://github.com/vatesfr/xen-orchestra/commit/fb1bf6a1e748b457f2d2b89ba02fa104554c03df
|
||||
try {
|
||||
;[, host, port, path, optionList] = NFS_RE.exec(rest)
|
||||
;[, host, port, path] = NFS_RE.exec(rest)
|
||||
} catch (err) {
|
||||
;[host, path] = rest.split(':')
|
||||
object.invalidUrl = true
|
||||
@@ -54,18 +29,16 @@ export const parse = string => {
|
||||
object.host = host
|
||||
object.port = port
|
||||
object.path = `/${trimStart(path, '/')}` // takes care of a missing leading slash coming from previous version format
|
||||
object = { ...parseOptionList(optionList), ...object }
|
||||
} else if (type === 'smb') {
|
||||
object.type = 'smb'
|
||||
const [, username, password, domain, host, path = '', optionList] = SMB_RE.exec(rest)
|
||||
const [, username, password, domain, host, path = ''] = SMB_RE.exec(rest)
|
||||
object.host = host
|
||||
object.path = path
|
||||
object.domain = domain
|
||||
object.username = username
|
||||
object.password = password
|
||||
object = { ...parseOptionList(optionList), ...object }
|
||||
} else if (type === 's3' || type === 's3+http') {
|
||||
const parsed = urlParser(string, false)
|
||||
const parsed = urlParser(string, true)
|
||||
object.protocol = parsed.protocol === 's3:' ? 'https' : 'http'
|
||||
object.type = 's3'
|
||||
object.region = parsed.hash.length === 0 ? undefined : parsed.hash.slice(1) // remove '#'
|
||||
@@ -73,12 +46,24 @@ export const parse = string => {
|
||||
object.path = parsed.pathname
|
||||
object.username = parsed.username
|
||||
object.password = decodeURIComponent(parsed.password)
|
||||
object = { ...parseOptionList(parsed.query), ...object }
|
||||
const qs = parsed.query
|
||||
object.allowUnauthorized = qs.allowUnauthorized === 'true'
|
||||
}
|
||||
return object
|
||||
}
|
||||
|
||||
export const format = ({ type, host, path, port, username, password, domain, protocol = type, region, ...options }) => {
|
||||
export const format = ({
|
||||
type,
|
||||
host,
|
||||
path,
|
||||
port,
|
||||
username,
|
||||
password,
|
||||
domain,
|
||||
protocol = type,
|
||||
region,
|
||||
allowUnauthorized = false,
|
||||
}) => {
|
||||
type === 'local' && (type = 'file')
|
||||
let string = `${type}://`
|
||||
if (type === 'nfs') {
|
||||
@@ -100,10 +85,8 @@ export const format = ({ type, host, path, port, username, password, domain, pro
|
||||
}
|
||||
string += path
|
||||
|
||||
const optionsList = makeOptionList(options)
|
||||
|
||||
if (optionsList !== '') {
|
||||
string += '?' + optionsList
|
||||
if (type === 's3' && allowUnauthorized === true) {
|
||||
string += `?allowUnauthorized=true`
|
||||
}
|
||||
if (type === 's3' && region !== undefined) {
|
||||
string += `#${region}`
|
||||
|
||||
@@ -15,14 +15,6 @@ const data = deepFreeze({
|
||||
path: '/var/lib/xoa/backup',
|
||||
},
|
||||
},
|
||||
'file with use vhd directory': {
|
||||
string: 'file:///var/lib/xoa/backup?useVhdDirectory=true',
|
||||
object: {
|
||||
type: 'file',
|
||||
path: '/var/lib/xoa/backup',
|
||||
useVhdDirectory: true,
|
||||
},
|
||||
},
|
||||
SMB: {
|
||||
string: 'smb://Administrator:pas:sw@ord@toto\\\\192.168.100.225\\smb\0',
|
||||
object: {
|
||||
@@ -34,18 +26,6 @@ const data = deepFreeze({
|
||||
password: 'pas:sw@ord',
|
||||
},
|
||||
},
|
||||
'smb with directory': {
|
||||
string: 'smb://Administrator:pas:sw@ord@toto\\\\192.168.100.225\\smb\0?useVhdDirectory=true',
|
||||
object: {
|
||||
type: 'smb',
|
||||
host: '192.168.100.225\\smb',
|
||||
path: '',
|
||||
domain: 'toto',
|
||||
username: 'Administrator',
|
||||
password: 'pas:sw@ord',
|
||||
useVhdDirectory: true,
|
||||
},
|
||||
},
|
||||
NFS: {
|
||||
string: 'nfs://192.168.100.225:/media/nfs',
|
||||
object: {
|
||||
@@ -64,18 +44,8 @@ const data = deepFreeze({
|
||||
path: '/media/nfs',
|
||||
},
|
||||
},
|
||||
'nfs with vhdDirectory': {
|
||||
string: 'nfs://192.168.100.225:20:/media/nfs?useVhdDirectory=true',
|
||||
object: {
|
||||
type: 'nfs',
|
||||
host: '192.168.100.225',
|
||||
port: '20',
|
||||
path: '/media/nfs',
|
||||
useVhdDirectory: true,
|
||||
},
|
||||
},
|
||||
S3: {
|
||||
string: 's3://AKIAS:XSuBupZ0mJlu%2B@s3-us-west-2.amazonaws.com/test-bucket/dir?allowUnauthorized=false',
|
||||
string: 's3://AKIAS:XSuBupZ0mJlu%2B@s3-us-west-2.amazonaws.com/test-bucket/dir',
|
||||
object: {
|
||||
type: 's3',
|
||||
protocol: 'https',
|
||||
@@ -100,21 +70,6 @@ const data = deepFreeze({
|
||||
allowUnauthorized: true,
|
||||
},
|
||||
},
|
||||
'S3 with brotli': {
|
||||
string:
|
||||
's3+http://Administrator:password@192.168.100.225/bucket/dir?compressionType=%22brotli%22&compressionOptions=%7B%22level%22%3A1%7D#reg1',
|
||||
object: {
|
||||
type: 's3',
|
||||
host: '192.168.100.225',
|
||||
protocol: 'http',
|
||||
path: '/bucket/dir',
|
||||
region: 'reg1',
|
||||
username: 'Administrator',
|
||||
password: 'password',
|
||||
compressionType: 'brotli',
|
||||
compressionOptions: { level: 1 },
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
const parseData = deepFreeze({
|
||||
@@ -156,6 +111,7 @@ const parseData = deepFreeze({
|
||||
region: 'reg1',
|
||||
username: 'Administrator',
|
||||
password: 'password',
|
||||
allowUnauthorized: false,
|
||||
},
|
||||
},
|
||||
'S3 accepting self signed certificate': {
|
||||
@@ -170,6 +126,19 @@ const parseData = deepFreeze({
|
||||
password: 'password',
|
||||
allowUnauthorized: true,
|
||||
},
|
||||
'S3 with broken allowUnauthorized': {
|
||||
string: 's3+http://Administrator:password@192.168.100.225/bucket/dir?allowUnauthorized=notTrue#reg1',
|
||||
object: {
|
||||
type: 's3',
|
||||
host: '192.168.100.225',
|
||||
protocol: 'http',
|
||||
path: '/bucket/dir',
|
||||
region: 'reg1',
|
||||
username: 'Administrator',
|
||||
password: 'password',
|
||||
allowUnauthorized: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
@@ -183,6 +152,19 @@ const formatData = deepFreeze({
|
||||
path: '/var/lib/xoa/backup',
|
||||
},
|
||||
},
|
||||
'S3 with broken allowUnauthorized': {
|
||||
string: 's3+http://Administrator:password@192.168.100.225/bucket/dir#reg1',
|
||||
object: {
|
||||
type: 's3',
|
||||
host: '192.168.100.225',
|
||||
protocol: 'http',
|
||||
path: '/bucket/dir',
|
||||
region: 'reg1',
|
||||
username: 'Administrator',
|
||||
password: 'password',
|
||||
allowUnauthorized: 'notTrue',
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-backup-reports",
|
||||
"version": "0.17.0",
|
||||
"version": "0.16.10",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Backup reports plugin for XO-Server",
|
||||
"keywords": [
|
||||
|
||||
@@ -154,7 +154,7 @@ poolMarkingPrefix = 'xo:clientInfo:'
|
||||
[xo-proxy]
|
||||
callTimeout = '1 min'
|
||||
|
||||
channel = 'xo-proxy-appliance-{xoChannel}'
|
||||
channel = 'xo-proxy-appliance'
|
||||
|
||||
namespace = 'xoProxyAppliance'
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
- [Collections](#collections)
|
||||
- [VM Export](#vm-export)
|
||||
- [VDI Export](#vdi-export)
|
||||
- [VDI Import](#vdi-import)
|
||||
- [The future](#the-future)
|
||||
|
||||
> This [REST](https://en.wikipedia.org/wiki/Representational_state_transfer)-oriented API is experimental. Non-backward compatible changes or removal may occur in any future release. Use of the feature is not recommended in production environments.
|
||||
@@ -138,28 +137,6 @@ curl \
|
||||
> myDisk.vhd
|
||||
```
|
||||
|
||||
## VDI Import
|
||||
|
||||
A VHD can be imported on an SR to create a VDI at `/rest/v0/srs/<sr uuid>/vdis`.
|
||||
|
||||
```bash
|
||||
curl \
|
||||
-X POST \
|
||||
-b authenticationToken=KQxQdm2vMiv7jBIK0hgkmgxKzemd8wSJ7ugFGKFkTbs \
|
||||
-T myDisk.vhd \
|
||||
'https://xo.company.lan/rest/v0/srs/357bd56c-71f9-4b2a-83b8-3451dec04b8f/vdis?name_label=my_imported_VDI' \
|
||||
| cat
|
||||
```
|
||||
|
||||
> Note: the final `| cat` ensures cURL's standard output is not a TTY, which is necessary for upload stats to be dislayed.
|
||||
|
||||
This request returns the UUID of the created VDI.
|
||||
|
||||
The following query parameters are supported to customize the created VDI:
|
||||
|
||||
- `name_label`
|
||||
- `name_description`
|
||||
|
||||
## The future
|
||||
|
||||
We are adding features and improving the REST API step by step. If you have interesting use cases or feedback, please ask directly at <https://xcp-ng.org/forum/category/12/xen-orchestra>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "xo-server",
|
||||
"version": "5.96.0",
|
||||
"version": "5.93.1",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Server part of Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -20,7 +20,6 @@
|
||||
"preferGlobal": true,
|
||||
"bin": {
|
||||
"xo-server": "dist/cli.mjs",
|
||||
"xo-server-db": "dist/db-cli.mjs",
|
||||
"xo-server-logs": "dist/logs-cli.mjs",
|
||||
"xo-server-recover-account": "dist/recover-account-cli.mjs"
|
||||
},
|
||||
@@ -40,21 +39,22 @@
|
||||
"@vates/predicates": "^1.0.0",
|
||||
"@vates/read-chunk": "^0.1.2",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.25.0",
|
||||
"@xen-orchestra/backups": "^0.23.0",
|
||||
"@xen-orchestra/cron": "^1.0.6",
|
||||
"@xen-orchestra/defined": "^0.0.1",
|
||||
"@xen-orchestra/emit-async": "^1.0.0",
|
||||
"@xen-orchestra/fs": "^1.0.3",
|
||||
"@xen-orchestra/emit-async": "^0.1.0",
|
||||
"@xen-orchestra/fs": "^1.0.1",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"@xen-orchestra/mixin": "^0.1.0",
|
||||
"@xen-orchestra/mixins": "^0.5.0",
|
||||
"@xen-orchestra/self-signed": "^0.1.3",
|
||||
"@xen-orchestra/mixins": "^0.4.0",
|
||||
"@xen-orchestra/self-signed": "^0.1.0",
|
||||
"@xen-orchestra/template": "^0.1.0",
|
||||
"@xen-orchestra/xapi": "^1.2.0",
|
||||
"@xen-orchestra/xapi": "^1.0.0",
|
||||
"ajv": "^8.0.3",
|
||||
"app-conf": "^2.1.0",
|
||||
"async-iterator-to-stream": "^1.0.1",
|
||||
"base64url": "^3.0.0",
|
||||
"bind-property-descriptor": "^2.0.0",
|
||||
"blocked-at": "^1.2.0",
|
||||
"bluebird": "^3.5.1",
|
||||
"body-parser": "^1.18.2",
|
||||
@@ -81,7 +81,7 @@
|
||||
"highland": "^2.11.1",
|
||||
"http-proxy": "^1.16.2",
|
||||
"http-request-plus": "^0.14.0",
|
||||
"http-server-plus": "^0.11.1",
|
||||
"http-server-plus": "^0.11.0",
|
||||
"human-format": "^1.0.0",
|
||||
"iterable-backoff": "^0.1.0",
|
||||
"js-yaml": "^4.1.0",
|
||||
@@ -126,15 +126,15 @@
|
||||
"unzipper": "^0.10.5",
|
||||
"uuid": "^8.3.1",
|
||||
"value-matcher": "^0.2.0",
|
||||
"vhd-lib": "^3.2.0",
|
||||
"vhd-lib": "^3.1.0",
|
||||
"ws": "^8.2.3",
|
||||
"xdg-basedir": "^5.1.0",
|
||||
"xen-api": "^1.2.1",
|
||||
"xen-api": "^1.2.0",
|
||||
"xo-acl-resolver": "^0.4.1",
|
||||
"xo-collection": "^0.5.0",
|
||||
"xo-common": "^0.8.0",
|
||||
"xo-remote-parser": "^0.8.0",
|
||||
"xo-vmdk-to-vhd": "^2.4.1"
|
||||
"xo-vmdk-to-vhd": "^2.3.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
import { createPredicate } from 'value-matcher'
|
||||
import { extractIdsFromSimplePattern } from '@xen-orchestra/backups/extractIdsFromSimplePattern.js'
|
||||
import { forbiddenOperation } from 'xo-common/api-errors.js'
|
||||
|
||||
export default async function backupGuard(poolId) {
|
||||
const jobs = await this.getAllJobs('backup')
|
||||
const guard = id => {
|
||||
if (this.getObject(id).$poolId === poolId) {
|
||||
throw forbiddenOperation('Backup is running', `A backup is running on the pool: ${poolId}`)
|
||||
}
|
||||
}
|
||||
|
||||
jobs.forEach(({ runId, vms }) => {
|
||||
// If runId is undefined, the job is not currently running.
|
||||
if (runId !== undefined) {
|
||||
if (vms.id !== undefined) {
|
||||
extractIdsFromSimplePattern(vms).forEach(guard)
|
||||
} else {
|
||||
// smartmode
|
||||
// For the smartmode we take a simplified approach :
|
||||
// if the smartmode is explicitly 'resident' or 'not resident' on pools : we check if it concern this pool
|
||||
// if not, the job may concern this pool and we show the warning without looking through all the impacted VM
|
||||
|
||||
const isPoolSafe = vms.$pool === undefined ? false : !createPredicate(vms.$pool)(poolId)
|
||||
if (!isPoolSafe) {
|
||||
throw forbiddenOperation('May have running backup', `A backup may run on the pool: ${poolId}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -9,7 +9,7 @@ get.description = 'get existing ACLs'
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function getCurrentPermissions() {
|
||||
return /* await */ this.getPermissionsForUser(this.apiContext.user.id)
|
||||
return /* await */ this.getPermissionsForUser(this.connection.get('user_id'))
|
||||
}
|
||||
|
||||
getCurrentPermissions.description = 'get (explicit) permissions by object for the current user'
|
||||
|
||||
@@ -8,9 +8,8 @@ getConnections.description = 'Get a list of all current connections to this API'
|
||||
getConnections.permission = 'admin'
|
||||
|
||||
export function closeAllConnections() {
|
||||
const currentConnection = this.apiContext.connection
|
||||
for (const connection of this.apiConnections) {
|
||||
if (connection !== currentConnection) {
|
||||
if (connection !== this.connection) {
|
||||
connection.close()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import { REMOVE_CACHE_ENTRY } from '../_pDebounceWithKey.mjs'
|
||||
import { safeDateFormat } from '../utils.mjs'
|
||||
|
||||
export function createJob({ schedules, ...job }) {
|
||||
job.userId = this.apiContext.user.id
|
||||
job.userId = this.user.id
|
||||
return this.createBackupNgJob(job, schedules).then(({ id }) => id)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,11 +6,10 @@ import { defer } from 'golike-defer'
|
||||
import { format, JsonRpcError } from 'json-rpc-peer'
|
||||
import { noSuchObject } from 'xo-common/api-errors.js'
|
||||
import { pipeline } from 'stream'
|
||||
import { peekFooterFromVhdStream } from 'vhd-lib'
|
||||
import { checkFooter, peekFooterFromVhdStream } from 'vhd-lib'
|
||||
import { vmdkToVhd } from 'xo-vmdk-to-vhd'
|
||||
|
||||
import { VDI_FORMAT_VHD, VDI_FORMAT_RAW } from '../xapi/index.mjs'
|
||||
import { parseSize } from '../utils.mjs'
|
||||
|
||||
const log = createLogger('xo:disk')
|
||||
|
||||
@@ -23,7 +22,7 @@ export const create = defer(async function ($defer, { name, size, sr, vm, bootab
|
||||
let resourceSet
|
||||
if (attach && (resourceSet = vm.resourceSet) != null) {
|
||||
try {
|
||||
await this.checkResourceSetConstraints(resourceSet, this.apiContext.user.id, [sr.id])
|
||||
await this.checkResourceSetConstraints(resourceSet, this.user.id, [sr.id])
|
||||
await this.allocateLimitsInResourceSet({ disk: size }, resourceSet)
|
||||
$defer.onFailure(() => this.releaseLimitsInResourceSet({ disk: size }, resourceSet))
|
||||
|
||||
@@ -37,26 +36,24 @@ export const create = defer(async function ($defer, { name, size, sr, vm, bootab
|
||||
// the resource set does not exist, falls back to normal check
|
||||
}
|
||||
|
||||
await this.checkPermissions([[sr.id, 'administrate']])
|
||||
await this.checkPermissions(this.user.id, [[sr.id, 'administrate']])
|
||||
} while (false)
|
||||
|
||||
const xapi = this.getXapi(sr)
|
||||
const vdi = await xapi._getOrWaitObject(
|
||||
await xapi.VDI_create({
|
||||
name_label: name,
|
||||
SR: sr._xapiRef,
|
||||
virtual_size: parseSize(size),
|
||||
})
|
||||
)
|
||||
const vdi = await xapi.createVdi({
|
||||
name_label: name,
|
||||
size,
|
||||
sr: sr._xapiId,
|
||||
})
|
||||
$defer.onFailure(() => vdi.$destroy())
|
||||
|
||||
if (attach) {
|
||||
await xapi.VBD_create({
|
||||
await xapi.createVbd({
|
||||
bootable,
|
||||
mode,
|
||||
userdevice: position,
|
||||
VDI: vdi.$ref,
|
||||
VM: vm._xapiRef,
|
||||
vdi: vdi.$id,
|
||||
vm: vm._xapiId,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -85,11 +82,8 @@ create.resolve = {
|
||||
const VHD = 'vhd'
|
||||
const VMDK = 'vmdk'
|
||||
|
||||
async function handleExportContent(req, res, { filename, format, vdi }) {
|
||||
const stream =
|
||||
format === VMDK
|
||||
? await vdi.$xapi.exportVdiAsVmdk(vdi.$id, filename)
|
||||
: await vdi.$exportContent({ format: VDI_FORMAT_VHD })
|
||||
async function handleExportContent(req, res, { xapi, id, filename, format }) {
|
||||
const stream = format === VMDK ? await xapi.exportVdiAsVmdk(id, filename) : await xapi.exportVdiContent(id)
|
||||
req.on('close', () => stream.destroy())
|
||||
|
||||
// Remove the filename as it is already part of the URL.
|
||||
@@ -109,7 +103,8 @@ export async function exportContent({ vdi, format = VHD }) {
|
||||
$getFrom: await this.registerHttpRequest(
|
||||
handleExportContent,
|
||||
{
|
||||
vdi: this.getXapiObject(vdi),
|
||||
id: vdi._xapiId,
|
||||
xapi: this.getXapi(vdi),
|
||||
filename,
|
||||
format,
|
||||
},
|
||||
@@ -131,19 +126,20 @@ exportContent.resolve = {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
async function handleImportContent(req, res, { vdi }) {
|
||||
async function handleImportContent(req, res, { xapi, id }) {
|
||||
// Timeout seems to be broken in Node 4.
|
||||
// See https://github.com/nodejs/node/issues/3319
|
||||
req.setTimeout(43200000) // 12 hours
|
||||
req.length = +req.headers['content-length']
|
||||
await vdi.$importContent(req, { format: VDI_FORMAT_VHD })
|
||||
await xapi.importVdiContent(id, req)
|
||||
res.end(format.response(0, true))
|
||||
}
|
||||
|
||||
export async function importContent({ vdi }) {
|
||||
return {
|
||||
$sendTo: await this.registerHttpRequest(handleImportContent, {
|
||||
vdi: this.getXapiObject(vdi),
|
||||
id: vdi._xapiId,
|
||||
xapi: this.getXapi(vdi),
|
||||
}),
|
||||
}
|
||||
}
|
||||
@@ -196,12 +192,14 @@ async function handleImport(req, res, { type, name, description, vmdkData, srId,
|
||||
break
|
||||
case 'vhd':
|
||||
{
|
||||
const footer = await peekFooterFromVhdStream(part).catch(e => {
|
||||
const footer = await peekFooterFromVhdStream(vhdStream)
|
||||
try {
|
||||
checkFooter(footer)
|
||||
} catch (e) {
|
||||
if (e instanceof assert.AssertionError) {
|
||||
throw new JsonRpcError(`Vhd file had an invalid header ${e}`)
|
||||
}
|
||||
throw e
|
||||
})
|
||||
}
|
||||
vhdStream = part
|
||||
size = footer.currentSize
|
||||
}
|
||||
@@ -215,16 +213,14 @@ async function handleImport(req, res, { type, name, description, vmdkData, srId,
|
||||
throw new JsonRpcError(`Unknown disk type, expected "iso", "vhd" or "vmdk", got ${type}`)
|
||||
}
|
||||
|
||||
const vdi = await xapi._getOrWaitObject(
|
||||
await xapi.VDI_create({
|
||||
name_description: description,
|
||||
name_label: name,
|
||||
SR: xapi.getObject(srId, 'SR').$ref,
|
||||
virtual_size: parseSize(size),
|
||||
})
|
||||
)
|
||||
const vdi = await xapi.createVdi({
|
||||
name_description: description,
|
||||
name_label: name,
|
||||
size,
|
||||
sr: srId,
|
||||
})
|
||||
try {
|
||||
await vdi.$importContent(vhdStream, { format: diskFormat })
|
||||
await xapi.importVdiContent(vdi, vhdStream, { format: diskFormat })
|
||||
res.end(format.response(0, vdi.$id))
|
||||
} catch (e) {
|
||||
await vdi.$destroy()
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import assert from 'assert'
|
||||
import { format } from 'json-rpc-peer'
|
||||
|
||||
import backupGuard from './_backupGuard.mjs'
|
||||
|
||||
const log = createLogger('xo:api:host')
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export function setMaintenanceMode({ host, maintenance }) {
|
||||
@@ -118,22 +113,13 @@ set.resolve = {
|
||||
|
||||
// FIXME: set force to false per default when correctly implemented in
|
||||
// UI.
|
||||
export async function restart({ bypassBackupCheck = false, host, force = true }) {
|
||||
if (bypassBackupCheck) {
|
||||
log.warn('host.restart with argument "bypassBackupCheck" set to true', { hostId: host.id })
|
||||
} else {
|
||||
await backupGuard.call(this, host.$poolId)
|
||||
}
|
||||
export function restart({ host, force = true }) {
|
||||
return this.getXapi(host).rebootHost(host._xapiId, force)
|
||||
}
|
||||
|
||||
restart.description = 'restart the host'
|
||||
|
||||
restart.params = {
|
||||
bypassBackupCheck: {
|
||||
type: 'boolean',
|
||||
optional: true,
|
||||
},
|
||||
id: { type: 'string' },
|
||||
force: {
|
||||
type: 'boolean',
|
||||
@@ -147,22 +133,13 @@ restart.resolve = {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function restartAgent({ bypassBackupCheck = false, host }) {
|
||||
if (bypassBackupCheck) {
|
||||
log.warn('host.restartAgent with argument "bypassBackupCheck" set to true', { hostId: host.id })
|
||||
} else {
|
||||
await backupGuard.call(this, host.$poolId)
|
||||
}
|
||||
export function restartAgent({ host }) {
|
||||
return this.getXapiObject(host).$restartAgent()
|
||||
}
|
||||
|
||||
restartAgent.description = 'restart the Xen agent on the host'
|
||||
|
||||
restartAgent.params = {
|
||||
bypassBackupCheck: {
|
||||
type: 'boolean',
|
||||
optional: true,
|
||||
},
|
||||
id: { type: 'string' },
|
||||
}
|
||||
|
||||
@@ -206,22 +183,13 @@ start.resolve = {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function stop({ bypassBackupCheck = false, host, bypassEvacuate }) {
|
||||
if (bypassBackupCheck) {
|
||||
log.warn('host.stop with argument "bypassBackupCheck" set to true', { hostId: host.id })
|
||||
} else {
|
||||
await backupGuard.call(this, host.$poolId)
|
||||
}
|
||||
export function stop({ host, bypassEvacuate }) {
|
||||
return this.getXapi(host).shutdownHost(host._xapiId, { bypassEvacuate })
|
||||
}
|
||||
|
||||
stop.description = 'stop the host'
|
||||
|
||||
stop.params = {
|
||||
bypassBackupCheck: {
|
||||
type: 'boolean',
|
||||
optional: true,
|
||||
},
|
||||
id: { type: 'string' },
|
||||
bypassEvacuate: { type: 'boolean', optional: true },
|
||||
}
|
||||
|
||||
@@ -18,15 +18,12 @@ delete_.description = 'Delete an ipPool'
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function getAll(params) {
|
||||
const { apiContext } = this
|
||||
const { user } = this
|
||||
|
||||
return this.getAllIpPools(apiContext.permission === 'admin' ? params.userId : apiContext.user.id)
|
||||
return this.getAllIpPools(user.permission === 'admin' ? params && params.userId : user.id)
|
||||
}
|
||||
|
||||
getAll.description = 'List all ipPools'
|
||||
getAll.params = {
|
||||
userId: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ get.params = {
|
||||
|
||||
export async function create({ job }) {
|
||||
if (!job.userId) {
|
||||
job.userId = this.apiContext.user.id
|
||||
job.userId = this.connection.get('user_id')
|
||||
}
|
||||
|
||||
return (await this.createJob(job)).id
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
export function createJob({ schedules, ...job }) {
|
||||
job.userId = this.apiContext.user.id
|
||||
job.userId = this.user.id
|
||||
return this.createMetadataBackupJob(job, schedules)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,18 +1,11 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { createPredicate } from 'value-matcher'
|
||||
import { defer as deferrable } from 'golike-defer'
|
||||
import { extractIdsFromSimplePattern } from '@xen-orchestra/backups/extractIdsFromSimplePattern.js'
|
||||
import { format } from 'json-rpc-peer'
|
||||
import { Ref } from 'xen-api'
|
||||
import { incorrectState } from 'xo-common/api-errors.js'
|
||||
|
||||
import backupGuard from './_backupGuard.mjs'
|
||||
|
||||
import { moveFirst } from '../_moveFirst.mjs'
|
||||
|
||||
const log = createLogger('xo:api:pool')
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export async function set({
|
||||
@@ -69,7 +62,7 @@ set.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function setDefaultSr({ sr }) {
|
||||
await this.hasPermissions(this.apiContext.user.id, [[sr.$pool, 'administrate']])
|
||||
await this.hasPermissions(this.user.id, [[sr.$pool, 'administrate']])
|
||||
|
||||
await this.getXapi(sr).setDefaultSr(sr._xapiId)
|
||||
}
|
||||
@@ -87,7 +80,7 @@ setDefaultSr.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function setPoolMaster({ host }) {
|
||||
await this.hasPermissions(this.apiContext.user.id, [[host.$pool, 'administrate']])
|
||||
await this.hasPermissions(this.user.id, [[host.$pool, 'administrate']])
|
||||
|
||||
await this.getXapi(host).setPoolMaster(host._xapiId)
|
||||
}
|
||||
@@ -169,51 +162,7 @@ installPatches.description = 'Install patches on hosts'
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export const rollingUpdate = deferrable(async function ($defer, { bypassBackupCheck = false, pool }) {
|
||||
const poolId = pool.id
|
||||
if (bypassBackupCheck) {
|
||||
log.warn('pool.rollingUpdate update with argument "bypassBackupCheck" set to true', { poolId })
|
||||
} else {
|
||||
await backupGuard.call(this, poolId)
|
||||
}
|
||||
|
||||
const [schedules, jobs] = await Promise.all([this.getAllSchedules(), this.getAllJobs('backup')])
|
||||
|
||||
const jobsOfthePool = []
|
||||
jobs.forEach(({ id: jobId, vms }) => {
|
||||
if (vms.id !== undefined) {
|
||||
for (const vmId of extractIdsFromSimplePattern(vms)) {
|
||||
// try/catch to avoid `no such object`
|
||||
try {
|
||||
if (this.getObject(vmId).$poolId === poolId) {
|
||||
jobsOfthePool.push(jobId)
|
||||
break
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
} else {
|
||||
// Smart mode
|
||||
// For smart mode, we take a simplified approach:
|
||||
// - if smart mode is explicitly 'resident' or 'not resident' on pools, we
|
||||
// check if it concerns this pool
|
||||
// - if not, the job may concern this pool so we add it to `jobsOfThePool`
|
||||
if (vms.$pool === undefined || createPredicate(vms.$pool)(poolId)) {
|
||||
jobsOfthePool.push(jobId)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Disable schedules
|
||||
await Promise.all(
|
||||
schedules
|
||||
.filter(schedule => jobsOfthePool.includes(schedule.jobId) && schedule.enabled)
|
||||
.map(async schedule => {
|
||||
await this.updateSchedule({ ...schedule, enabled: false })
|
||||
$defer(() => this.updateSchedule({ ...schedule, enabled: true }))
|
||||
})
|
||||
)
|
||||
|
||||
// Disable load balancer
|
||||
export const rollingUpdate = deferrable(async function ($defer, { pool }) {
|
||||
if ((await this.getOptionalPlugin('load-balancer'))?.loaded) {
|
||||
await this.unloadPlugin('load-balancer')
|
||||
$defer(() => this.loadPlugin('load-balancer'))
|
||||
@@ -223,10 +172,6 @@ export const rollingUpdate = deferrable(async function ($defer, { bypassBackupCh
|
||||
})
|
||||
|
||||
rollingUpdate.params = {
|
||||
bypassBackupCheck: {
|
||||
optional: true,
|
||||
type: 'boolean',
|
||||
},
|
||||
pool: { type: 'string' },
|
||||
}
|
||||
|
||||
@@ -285,7 +230,10 @@ getPatchesDifference.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function mergeInto({ source, sources = [source], target, force }) {
|
||||
await this.checkPermissions(sources.map(source => [source, 'administrate']))
|
||||
await this.checkPermissions(
|
||||
this.user.id,
|
||||
sources.map(source => [source, 'administrate'])
|
||||
)
|
||||
return this.mergeInto({
|
||||
force,
|
||||
sources,
|
||||
|
||||
@@ -107,7 +107,7 @@ get.params = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function getAll() {
|
||||
return this.getAllResourceSets(this.apiContext.user.id)
|
||||
return this.getAllResourceSets(this.user.id)
|
||||
}
|
||||
|
||||
getAll.description = 'Get the list of all existing resource set'
|
||||
|
||||
@@ -17,14 +17,16 @@ get.params = {
|
||||
id: { type: 'string' },
|
||||
}
|
||||
|
||||
export function create({ cron, enabled, jobId, name, timezone }) {
|
||||
export function create({ cron, enabled, healthCheckSr, healthCheckVmsWithTags, jobId, name, timezone }) {
|
||||
return this.createSchedule({
|
||||
cron,
|
||||
enabled,
|
||||
healthCheckSr,
|
||||
healthCheckVmsWithTags,
|
||||
jobId,
|
||||
name,
|
||||
timezone,
|
||||
userId: this.apiContext.user.id,
|
||||
userId: this.connection.get('user_id'),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -33,13 +35,15 @@ create.description = 'Creates a new schedule'
|
||||
create.params = {
|
||||
cron: { type: 'string' },
|
||||
enabled: { type: 'boolean', optional: true },
|
||||
healthCheckSr: { type: 'string', optional: true },
|
||||
healthCheckVmsWithTags: { type: 'array', items: { type: 'string' }, optional: true },
|
||||
jobId: { type: 'string' },
|
||||
name: { type: 'string', optional: true },
|
||||
timezone: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
export async function set({ cron, enabled, id, jobId, name, timezone }) {
|
||||
await this.updateSchedule({ cron, enabled, id, jobId, name, timezone })
|
||||
export async function set({ cron, enabled, healthCheckSr, healthCheckVmsWithTags, id, jobId, name, timezone }) {
|
||||
await this.updateSchedule({ cron, enabled, healthCheckSr, healthCheckVmsWithTags, id, jobId, name, timezone })
|
||||
}
|
||||
|
||||
set.permission = 'admin'
|
||||
@@ -47,6 +51,8 @@ set.description = 'Modifies an existing schedule'
|
||||
set.params = {
|
||||
cron: { type: 'string', optional: true },
|
||||
enabled: { type: 'boolean', optional: true },
|
||||
healthCheckSr: { type: 'string', optional: true },
|
||||
healthCheckVmsWithTags: { type: 'array', items: { type: 'string' }, optional: true },
|
||||
id: { type: 'string' },
|
||||
jobId: { type: 'string', optional: true },
|
||||
name: { type: ['string', 'null'], optional: true },
|
||||
|
||||
@@ -5,7 +5,7 @@ import { getUserPublicProperties } from '../utils.mjs'
|
||||
// ===================================================================
|
||||
|
||||
export async function signIn(credentials) {
|
||||
const { connection } = this.apiContext
|
||||
const { connection } = this
|
||||
|
||||
const { user, expiration } = await this.authenticateUser(credentials, {
|
||||
ip: connection.get('user_ip', undefined),
|
||||
@@ -47,7 +47,7 @@ signInWithToken.permission = null // user does not need to be authenticated
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function signOut() {
|
||||
this.apiContext.connection.unset('user_id')
|
||||
this.connection.unset('user_id')
|
||||
}
|
||||
|
||||
signOut.description = 'sign out the user from the current session'
|
||||
@@ -55,7 +55,7 @@ signOut.description = 'sign out the user from the current session'
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function getUser() {
|
||||
const userId = this.apiContext.user.id
|
||||
const userId = this.connection.get('user_id')
|
||||
|
||||
return userId === undefined ? null : getUserPublicProperties(await this.getUser(userId))
|
||||
}
|
||||
|
||||
@@ -170,15 +170,18 @@ export async function createIso({
|
||||
})
|
||||
}
|
||||
|
||||
const srRef = await xapi.SR_create({
|
||||
content_type: 'iso',
|
||||
device_config: deviceConfig,
|
||||
host: host._xapiRef,
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
shared: type !== 'local',
|
||||
type: 'iso',
|
||||
})
|
||||
const srRef = await xapi.call(
|
||||
'SR.create',
|
||||
host._xapiRef,
|
||||
deviceConfig,
|
||||
'0', // SR size 0 because ISO
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
'iso', // SR type ISO
|
||||
'iso', // SR content type ISO
|
||||
type !== 'local',
|
||||
{}
|
||||
)
|
||||
|
||||
const sr = await xapi.call('SR.get_record', srRef)
|
||||
return sr.uuid
|
||||
@@ -244,14 +247,18 @@ export async function createNfs({
|
||||
})
|
||||
}
|
||||
|
||||
const srRef = await xapi.SR_create({
|
||||
device_config: deviceConfig,
|
||||
host: host._xapiRef,
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
shared: true,
|
||||
type: 'nfs', // SR LVM over iSCSI
|
||||
})
|
||||
const srRef = await xapi.call(
|
||||
'SR.create',
|
||||
host._xapiRef,
|
||||
deviceConfig,
|
||||
'0',
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
'nfs', // SR LVM over iSCSI
|
||||
'user', // recommended by Citrix
|
||||
true,
|
||||
{}
|
||||
)
|
||||
|
||||
const sr = await xapi.call('SR.get_record', srRef)
|
||||
return sr.uuid
|
||||
@@ -295,14 +302,18 @@ export async function createHba({ host, nameLabel, nameDescription, scsiId, srUu
|
||||
})
|
||||
}
|
||||
|
||||
const srRef = await xapi.SR_create({
|
||||
device_config: deviceConfig,
|
||||
host: host._xapiRef,
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
shared: true,
|
||||
type: 'lvmohba', // SR LVM over HBA
|
||||
})
|
||||
const srRef = await xapi.call(
|
||||
'SR.create',
|
||||
host._xapiRef,
|
||||
deviceConfig,
|
||||
'0',
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
'lvmohba', // SR LVM over HBA
|
||||
'user', // recommended by Citrix
|
||||
true,
|
||||
{}
|
||||
)
|
||||
|
||||
const sr = await xapi.call('SR.get_record', srRef)
|
||||
return sr.uuid
|
||||
@@ -332,14 +343,18 @@ export async function createLvm({ host, nameLabel, nameDescription, device }) {
|
||||
device,
|
||||
}
|
||||
|
||||
const srRef = await xapi.SR_create({
|
||||
device_config: deviceConfig,
|
||||
host: host._xapiRef,
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
shared: false,
|
||||
type: 'lvm', // SR LVM
|
||||
})
|
||||
const srRef = await xapi.call(
|
||||
'SR.create',
|
||||
host._xapiRef,
|
||||
deviceConfig,
|
||||
'0',
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
'lvm', // SR LVM
|
||||
'user', // recommended by Citrix
|
||||
false,
|
||||
{}
|
||||
)
|
||||
|
||||
const sr = await xapi.call('SR.get_record', srRef)
|
||||
return sr.uuid
|
||||
@@ -368,14 +383,18 @@ export async function createExt({ host, nameLabel, nameDescription, device }) {
|
||||
device,
|
||||
}
|
||||
|
||||
const srRef = await xapi.SR_create({
|
||||
device_config: deviceConfig,
|
||||
host: host._xapiRef,
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
shared: false,
|
||||
type: 'ext', // SR ext
|
||||
})
|
||||
const srRef = await xapi.call(
|
||||
'SR.create',
|
||||
host._xapiRef,
|
||||
deviceConfig,
|
||||
'0',
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
'ext', // SR ext
|
||||
'user', // recommended by Citrix
|
||||
false,
|
||||
{}
|
||||
)
|
||||
|
||||
const sr = await xapi.call('SR.get_record', srRef)
|
||||
return sr.uuid
|
||||
@@ -439,18 +458,13 @@ export async function createZfs({ host, nameLabel, nameDescription, location })
|
||||
const xapi = this.getXapi(host)
|
||||
// only XCP-ng >=8.2 support the ZFS SR
|
||||
const types = await xapi.call('SR.get_supported_types')
|
||||
return await xapi.getField(
|
||||
'SR',
|
||||
await xapi.SR_create({
|
||||
device_config: { location },
|
||||
host: host._xapiRef,
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
shared: false,
|
||||
type: types.includes('zfs') ? 'zfs' : 'file',
|
||||
}),
|
||||
'uuid'
|
||||
)
|
||||
return xapi.createSr({
|
||||
hostRef: host._xapiRef,
|
||||
name_label: nameLabel,
|
||||
name_description: nameDescription,
|
||||
type: types.includes('zfs') ? 'zfs' : 'file',
|
||||
device_config: { location },
|
||||
})
|
||||
}
|
||||
|
||||
createZfs.params = {
|
||||
@@ -600,14 +614,18 @@ export async function createIscsi({
|
||||
})
|
||||
}
|
||||
|
||||
const srRef = await xapi.SR_create({
|
||||
device_config: deviceConfig,
|
||||
host: host._xapiRef,
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
shared: true,
|
||||
type: 'lvmoiscsi', // SR LVM over iSCSI
|
||||
})
|
||||
const srRef = await xapi.call(
|
||||
'SR.create',
|
||||
host._xapiRef,
|
||||
deviceConfig,
|
||||
'0',
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
'lvmoiscsi', // SR LVM over iSCSI
|
||||
'user', // recommended by Citrix
|
||||
true,
|
||||
{}
|
||||
)
|
||||
|
||||
const sr = await xapi.call('SR.get_record', srRef)
|
||||
return sr.uuid
|
||||
@@ -913,38 +931,3 @@ stats.params = {
|
||||
stats.resolve = {
|
||||
sr: ['id', 'SR', 'view'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function enableMaintenanceMode({ sr, vmsToShutdown }) {
|
||||
return this.getXapiObject(sr).$enableMaintenanceMode({ vmsToShutdown })
|
||||
}
|
||||
|
||||
enableMaintenanceMode.description = 'switch the SR into maintenance mode'
|
||||
|
||||
enableMaintenanceMode.params = {
|
||||
id: { type: 'string' },
|
||||
vmsToShutdown: { type: 'array', items: { type: 'string' }, optional: true },
|
||||
}
|
||||
|
||||
enableMaintenanceMode.permission = 'admin'
|
||||
|
||||
enableMaintenanceMode.resolve = {
|
||||
sr: ['id', 'SR', 'operate'],
|
||||
}
|
||||
|
||||
export function disableMaintenanceMode({ sr }) {
|
||||
return this.getXapiObject(sr).$disableMaintenanceMode()
|
||||
}
|
||||
|
||||
disableMaintenanceMode.description = 'disable the maintenance of the SR'
|
||||
|
||||
disableMaintenanceMode.params = {
|
||||
id: { type: 'string' },
|
||||
}
|
||||
|
||||
disableMaintenanceMode.permission = 'admin'
|
||||
|
||||
disableMaintenanceMode.resolve = {
|
||||
sr: ['id', 'SR', 'operate'],
|
||||
}
|
||||
|
||||
@@ -73,7 +73,4 @@ export function methodSignature({ method: name }) {
|
||||
]
|
||||
}
|
||||
methodSignature.description = 'returns the signature of an API method'
|
||||
methodSignature.params = {
|
||||
method: { type: 'string' },
|
||||
}
|
||||
methodSignature.permission = null // user does not need to be authenticated
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
// TODO: Prevent token connections from creating tokens.
|
||||
// TODO: Token permission.
|
||||
export async function create({ description, expiresIn }) {
|
||||
export async function create({ expiresIn }) {
|
||||
return (
|
||||
await this.createAuthenticationToken({
|
||||
description,
|
||||
expiresIn,
|
||||
userId: this.apiContext.user.id,
|
||||
userId: this.connection.get('user_id'),
|
||||
})
|
||||
).id
|
||||
}
|
||||
@@ -13,10 +12,6 @@ export async function create({ description, expiresIn }) {
|
||||
create.description = 'create a new authentication token'
|
||||
|
||||
create.params = {
|
||||
description: {
|
||||
optional: true,
|
||||
type: 'string',
|
||||
},
|
||||
expiresIn: {
|
||||
optional: true,
|
||||
type: ['number', 'string'],
|
||||
@@ -25,6 +20,7 @@ create.params = {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// TODO: an user should be able to delete its own tokens.
|
||||
async function delete_({ token: id }) {
|
||||
await this.deleteAuthenticationToken(id)
|
||||
}
|
||||
@@ -33,6 +29,8 @@ export { delete_ as delete }
|
||||
|
||||
delete_.description = 'delete an existing authentication token'
|
||||
|
||||
delete_.permission = 'admin'
|
||||
|
||||
delete_.params = {
|
||||
token: { type: 'string' },
|
||||
}
|
||||
@@ -42,7 +40,7 @@ delete_.params = {
|
||||
export async function deleteAll({ except }) {
|
||||
await this.deleteAuthenticationTokens({
|
||||
filter: {
|
||||
user_id: this.apiContext.user.id,
|
||||
user_id: this.connection.get('user_id'),
|
||||
id: {
|
||||
__not: except,
|
||||
},
|
||||
@@ -55,16 +53,3 @@ deleteAll.description = 'delete all tokens of the current user except the curren
|
||||
deleteAll.params = {
|
||||
except: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function set({ id, ...props }) {
|
||||
await this.updateAuthenticationToken({ id, user_id: this.apiContext.user.id }, props)
|
||||
}
|
||||
|
||||
set.description = 'changes the properties of an existing token'
|
||||
|
||||
set.params = {
|
||||
description: { type: ['null', 'string'], optional: true },
|
||||
id: { type: 'string' },
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ create.params = {
|
||||
|
||||
// Deletes an existing user.
|
||||
async function delete_({ id }) {
|
||||
if (id === this.apiContext.user.id) {
|
||||
if (id === this.connection.get('user_id')) {
|
||||
throw invalidParameters('a user cannot delete itself')
|
||||
}
|
||||
|
||||
@@ -58,18 +58,10 @@ getAll.permission = 'admin'
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function getAuthenticationTokens() {
|
||||
return this.getAuthenticationTokensForUser(this.apiContext.user.id)
|
||||
}
|
||||
|
||||
getAuthenticationTokens.description = 'returns authentication tokens of the current user'
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function set({ id, email, password, permission, preferences }) {
|
||||
const isAdmin = this.apiContext.permission === 'admin'
|
||||
const isAdmin = this.user && this.user.permission === 'admin'
|
||||
if (isAdmin) {
|
||||
if (permission && id === this.apiContext.user.id) {
|
||||
if (permission && id === this.connection.get('user_id')) {
|
||||
throw invalidParameters('a user cannot change its own permission')
|
||||
}
|
||||
} else if (email || password || permission) {
|
||||
@@ -97,7 +89,7 @@ set.params = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function changePassword({ oldPassword, newPassword }) {
|
||||
const { user } = this.apiContext
|
||||
const { user } = this
|
||||
|
||||
if (!isEmpty(user.authProviders)) {
|
||||
throw forbiddenOperation('change password', 'synchronized users cannot change their passwords')
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// FIXME: too low level, should be removed.
|
||||
|
||||
async function delete_({ vbd }) {
|
||||
await this.getXapiObject(vbd).$destroy()
|
||||
await this.getXapi(vbd).deleteVbd(vbd)
|
||||
}
|
||||
|
||||
delete_.params = {
|
||||
@@ -17,7 +17,8 @@ export { delete_ as delete }
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function disconnect({ vbd }) {
|
||||
await this.getXapiObject(vbd).$unplug()
|
||||
const xapi = this.getXapi(vbd)
|
||||
await xapi.disconnectVbd(vbd._xapiRef)
|
||||
}
|
||||
|
||||
disconnect.params = {
|
||||
|
||||
@@ -54,14 +54,14 @@ export const set = defer(async function ($defer, params) {
|
||||
vbds.length === 1 &&
|
||||
(resourceSetId = xapi.xo.getData(this.getObject(vbds[0], 'VBD').VM, 'resourceSet')) !== undefined
|
||||
) {
|
||||
if (this.apiContext.permission !== 'admin') {
|
||||
await this.checkResourceSetConstraints(resourceSetId, this.apiContext.user.id)
|
||||
if (this.user.permission !== 'admin') {
|
||||
await this.checkResourceSetConstraints(resourceSetId, this.user.id)
|
||||
}
|
||||
|
||||
await this.allocateLimitsInResourceSet({ disk: size - vdi.size }, resourceSetId)
|
||||
$defer.onFailure(() => this.releaseLimitsInResourceSet({ disk: size - vdi.size }, resourceSetId))
|
||||
} else {
|
||||
await this.checkPermissions([[vdi.$SR, 'operate']])
|
||||
await this.checkPermissions(this.user.id, [[vdi.$SR, 'operate']])
|
||||
}
|
||||
|
||||
await xapi.resizeVdi(ref, size)
|
||||
@@ -105,11 +105,11 @@ set.resolve = {
|
||||
export async function migrate({ vdi, sr, resourceSet }) {
|
||||
const xapi = this.getXapi(vdi)
|
||||
|
||||
if (this.apiContext.permission !== 'admin') {
|
||||
if (this.user.permission !== 'admin') {
|
||||
if (resourceSet !== undefined) {
|
||||
await this.checkResourceSetConstraints(resourceSet, this.apiContext.user.id, [sr.id])
|
||||
await this.checkResourceSetConstraints(resourceSet, this.user.id, [sr.id])
|
||||
} else {
|
||||
await this.checkPermissions([[sr.id, 'administrate']])
|
||||
await this.checkPermissions(this.user.id, [[sr.id, 'administrate']])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -84,19 +84,19 @@ export async function set({
|
||||
const newIpAddresses = newIpv4Addresses.concat(newIpv6Addresses)
|
||||
|
||||
if (lockingMode !== undefined) {
|
||||
await this.checkPermissions([[network?.id ?? vif.$network, 'operate']])
|
||||
await this.checkPermissions(this.user.id, [[network?.id ?? vif.$network, 'operate']])
|
||||
}
|
||||
|
||||
if (isNetworkChanged || mac) {
|
||||
const networkId = network?.id
|
||||
if (mac !== undefined && this.apiContext.permission !== 'admin') {
|
||||
await this.checkPermissions([[networkId ?? vif.$network, 'administrate']])
|
||||
if (mac !== undefined && this.user.permission !== 'admin') {
|
||||
await this.checkPermissions(this.user.id, [[networkId ?? vif.$network, 'administrate']])
|
||||
}
|
||||
if (networkId !== undefined && this.apiContext.permission !== 'admin') {
|
||||
if (networkId !== undefined && this.user.permission !== 'admin') {
|
||||
if (resourceSet !== undefined) {
|
||||
await this.checkResourceSetConstraints(resourceSet, this.apiContext.user.id, [networkId])
|
||||
await this.checkResourceSetConstraints(resourceSet, this.user.id, [networkId])
|
||||
} else {
|
||||
await this.checkPermissions([[networkId, 'operate']])
|
||||
await this.checkPermissions(this.user.id, [[networkId, 'operate']])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user