Compare commits

..

15 Commits

Author SHA1 Message Date
Julien Fontanet
71d2c28899 WiP: should proxy 2022-11-29 10:31:07 +01:00
Julien Fontanet
18ece4b90c fix(xo-server/MigrateVm): fix uuid import
Introduced by 72c69d791

Fixes #6561
2022-11-29 10:30:09 +01:00
Florent Beauchamp
3862fb2664 fix(fs/rename): throw ENOENT when source file is missing 2022-11-28 17:33:57 +01:00
Florent BEAUCHAMP
72c69d791a feat(xo-server): implement warm migration backend (#6549) 2022-11-28 17:28:19 +01:00
Julien Fontanet
d6192a4a7a chore: remove unused travis-tests.js 2022-11-28 15:51:47 +01:00
Julien Fontanet
0f824ffa70 lint(vhd-lib): remove unused var and fix formatting
Introduced by f6c227e7f
2022-11-26 10:10:08 +01:00
Florent BEAUCHAMP
f6c227e7f5 feat(vhd-lib): merge resume can resume when rename fails (#6530) 2022-11-25 20:51:33 +01:00
Julien Fontanet
9d5bc8af6e feat: run-script.js now only shows output on error by default 2022-11-25 15:45:52 +01:00
Julien Fontanet
9480079770 feat: script test-unit now bails on first error 2022-11-25 15:45:08 +01:00
Julien Fontanet
54fe9147ac chore: only enable Babel debug on prod builds
The output was making test results hard to see.
2022-11-25 14:43:36 +01:00
Gabriel Gunullu
b6a0477232 feat(xo-server-transport-nagios): report backed up VM individually (#6534) 2022-11-25 14:36:41 +01:00
Julien Fontanet
c60644c578 chore(lite): merge lint with the root config 2022-11-25 11:23:04 +01:00
Thierry Goettelmann
abdce94c5f feat(lite): type check on test (#6547) 2022-11-25 11:19:58 +01:00
Mathieu
d7dee04013 feat(xo-web/settings/users): remove OTP of users in admin panel (#6541)
See https://xcp-ng.org/forum/topic/6521
2022-11-25 11:15:07 +01:00
Julien Fontanet
dfc62132b7 fix(xo-web/remote): prevent browser from autocompleting encryption key 2022-11-24 18:48:45 +01:00
21 changed files with 635 additions and 220 deletions

View File

@@ -5,7 +5,6 @@ const PRESETS_RE = /^@babel\/preset-.+$/
const NODE_ENV = process.env.NODE_ENV || 'development'
const __PROD__ = NODE_ENV === 'production'
const __TEST__ = NODE_ENV === 'test'
const configs = {
'@babel/plugin-proposal-decorators': {
@@ -15,7 +14,7 @@ const configs = {
proposal: 'minimal',
},
'@babel/preset-env': {
debug: !__TEST__,
debug: __PROD__,
// disabled until https://github.com/babel/babel/issues/8323 is resolved
// loose: true,

View File

@@ -297,6 +297,7 @@ export default class RemoteHandlerAbstract {
await this._mktree(dirname(newPath))
return this.#rename(oldPath, newPath, { checksum }, false)
}
throw error
}
}

View File

@@ -7,8 +7,8 @@
"preview": "vite preview --port 4173",
"build-only": "GIT_HEAD=$(git rev-parse HEAD) vite build",
"deploy": "./scripts/deploy.sh",
"type-check": "vue-tsc --noEmit",
"lint": "eslint . --ext .vue,.js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --ignore-path .gitignore"
"test": "yarn run type-check",
"type-check": "vue-tsc --noEmit"
},
"dependencies": {
"@fortawesome/fontawesome-svg-core": "^6.1.1",

View File

@@ -9,6 +9,8 @@
- [Remotes] Prevent remote path from ending with `xo-vm-backups` as it's usually a mistake
- [OVA export] Speed up OVA generation by 2. Generated file will be bigger (as big as uncompressed XVA) (PR [#6487](https://github.com/vatesfr/xen-orchestra/pull/6487))
- [Settings/Users] Add `Remove` button to delete OTP of users from the admin panel [Forum#6521](https://xcp-ng.org/forum/topic/6521/remove-totp-on-a-user-account) (PR [#6541](https://github.com/vatesfr/xen-orchestra/pull/6541))
- [Plugin/transport-nagios] XO now reports beckup VMs invidually with the VM name label used as *host* and backup job name used as *service*
### Bug fixes
@@ -17,6 +19,7 @@
- [Dashboard/Health] Fix `Unknown SR` and `Unknown VDI` in Unhealthy VDIs (PR [#6519](https://github.com/vatesfr/xen-orchestra/pull/6519))
- [Delta Backup] Can now recover VHD merge when failed at the begining
- [Delta Backup] Fix `ENOENT` errors when merging a VHD directory on non-S3 remote
- [Remote] Prevent the browser from auto-completing the encryption key field
### Packages to release
@@ -40,6 +43,7 @@
- vhd-lib minor
- xo-cli patch
- xo-server minor
- xo-server-transport-nagios major
- xo-vmdk-to-vhd minor
- xo-web minor

View File

@@ -74,7 +74,7 @@
"scripts/run-changed-pkgs.js test",
"prettier --ignore-unknown --write"
],
"*.{{,c,m}j,t}s{,x}": [
"*.{{{,c,m}j,t}s{,x},vue}": [
"eslint --ignore-pattern '!*'",
"jest --testRegex='^(?!.*.integ.spec.js$).*.spec.js$' --findRelatedTests --passWithNoTests"
]
@@ -82,7 +82,7 @@
"private": true,
"scripts": {
"build": "scripts/run-script.js --parallel --concurrency 2 build",
"ci": "yarn && scripts/run-script.js --parallel prepare && yarn test-lint && yarn test-integration",
"ci": "yarn && yarn build && yarn test-lint && yarn test-integration",
"clean": "scripts/run-script.js --parallel clean",
"dev": "scripts/run-script.js --parallel dev",
"dev-test": "jest --bail --watch \"^(?!.*\\.integ\\.spec\\.js$)\"",
@@ -93,8 +93,7 @@
"test": "npm run test-lint && npm run test-unit",
"test-integration": "jest \".integ\\.spec\\.js$\"",
"test-lint": "eslint --ignore-path .gitignore --ignore-pattern packages/xo-web .",
"test-unit": "jest \"^(?!.*\\.integ\\.spec\\.js$)\" && scripts/run-script.js test",
"travis-tests": "scripts/travis-tests.js"
"test-unit": "jest \"^(?!.*\\.integ\\.spec\\.js$)\" && scripts/run-script.js --bail test"
},
"workspaces": [
"@*/*",

View File

@@ -6,10 +6,10 @@ const fs = require('fs-extra')
const rimraf = require('rimraf')
const tmp = require('tmp')
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { pFromCallback } = require('promise-toolbox')
const { pFromCallback, Disposable } = require('promise-toolbox')
const { VhdFile, chainVhd } = require('./index')
const { _cleanupVhds: cleanupVhds, mergeVhdChain } = require('./merge')
const { VhdFile, chainVhd, openVhd } = require('./index')
const { mergeVhdChain } = require('./merge')
const { checkFile, createRandomFile, convertFromRawToVhd } = require('./tests/utils')
@@ -163,6 +163,77 @@ test('it can resume a simple merge ', async () => {
}
})
test('it can resume a failed renaming ', async () => {
const mbOfFather = 8
const mbOfChildren = 4
const parentRandomFileName = `${tempDir}/randomfile`
await createRandomFile(`${tempDir}/randomfile`, mbOfFather)
await convertFromRawToVhd(`${tempDir}/randomfile`, `${tempDir}/parent.vhd`)
const parentVhd = new VhdFile(handler, 'parent.vhd')
await parentVhd.readHeaderAndFooter()
await createRandomFile(`${tempDir}/small_randomfile`, mbOfChildren)
await convertFromRawToVhd(`${tempDir}/small_randomfile`, `${tempDir}/child1.vhd`)
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
const childVhd = new VhdFile(handler, 'child1.vhd')
await childVhd.readHeaderAndFooter()
await handler.writeFile(
'.parent.vhd.merge.json',
JSON.stringify({
parent: {
header: parentVhd.header.checksum,
},
child: {
header: childVhd.header.checksum,
},
step: 'cleanupVhds',
})
)
// expect merge to succed
await mergeVhdChain(handler, ['parent.vhd', 'child1.vhd'])
// parent have been renamed
expect(await fs.exists(`${tempDir}/parent.vhd`)).toBeFalsy()
expect(await fs.exists(`${tempDir}/.parent.vhd.merge.json`)).toBeFalsy()
Disposable.use(openVhd(handler, 'child1.vhd'), async mergedVhd => {
await mergedVhd.readBlockAllocationTable()
// the resume is at the step 'cleanupVhds' it should not have merged blocks and should still contians parent data
let offset = 0
const fd = await fs.open(parentRandomFileName, 'r')
for await (const block of mergedVhd.blocks()) {
const blockContent = block.data
const buffer = Buffer.alloc(blockContent.length)
await fs.read(fd, buffer, 0, buffer.length, offset)
expect(buffer.equals(blockContent)).toEqual(true)
offset += childVhd.header.blockSize
}
})
// merge succeed if renaming was already done
await handler.writeFile(
'.parent.vhd.merge.json',
JSON.stringify({
parent: {
header: parentVhd.header.checksum,
},
child: {
header: childVhd.header.checksum,
},
step: 'cleanupVhds',
})
)
await mergeVhdChain(handler, ['parent.vhd', 'child1.vhd'])
expect(await fs.exists(`${tempDir}/parent.vhd`)).toBeFalsy()
expect(await fs.exists(`${tempDir}/child1.vhd`)).toBeTruthy()
expect(await fs.exists(`${tempDir}/.parent.vhd.merge.json`)).toBeFalsy()
})
test('it can resume a multiple merge ', async () => {
const mbOfFather = 8
const mbOfChildren = 6
@@ -226,7 +297,11 @@ test('it can resume a multiple merge ', async () => {
})
)
// it should succeed
await mergeVhdChain(handler, ['parent.vhd', 'child.vhd', 'grandchild.vhd'])
await mergeVhdChain(handler, ['parent.vhd', 'child.vhd', 'grandchild.vhd'], { removeUnused: true })
expect(await fs.exists(`${tempDir}/parent.vhd`)).toBeFalsy()
expect(await fs.exists(`${tempDir}/child.vhd`)).toBeFalsy()
expect(await fs.exists(`${tempDir}/grandchild.vhd`)).toBeTruthy()
expect(await fs.exists(`${tempDir}/.parent.vhd.merge.json`)).toBeFalsy()
})
test('it merge multiple child in one pass ', async () => {
@@ -278,18 +353,3 @@ test('it merge multiple child in one pass ', async () => {
offset += parentVhd.header.blockSize
}
})
test('it cleans vhd mergedfiles', async () => {
await handler.writeFile('parent', 'parentData')
await handler.writeFile('child1', 'child1Data')
await handler.writeFile('child2', 'child2Data')
await handler.writeFile('child3', 'child3Data')
await cleanupVhds(handler, ['parent', 'child1', 'child2', 'child3'], { merge: true, removeUnused: true })
// only child3 should stay, with the data of parent
const [child3, ...other] = await handler.list('.')
expect(other.length).toEqual(0)
expect(child3).toEqual('child3')
expect((await handler.readFile('child3')).toString('utf8')).toEqual('parentData')
})

View File

@@ -41,91 +41,97 @@ const { warn } = createLogger('vhd-lib:merge')
// | |
// \_____________rename_____________/
// write the merge progress file at most every `delay` seconds
function makeThrottledWriter(handler, path, delay) {
let lastWrite = 0
return async json => {
class Merger {
#chain
#childrenPaths
#handler
#isResuming = false
#lastStateWrittenAt = 0
#logInfo
#mergeBlockConcurrency
#onProgress
#parentPath
#removeUnused
#state
#statePath
constructor(handler, chain, { onProgress, logInfo, removeUnused, mergeBlockConcurrency }) {
this.#chain = chain
this.#handler = handler
this.#parentPath = chain[0]
this.#childrenPaths = chain.slice(1)
this.#logInfo = logInfo
this.#onProgress = onProgress
this.#removeUnused = removeUnused
this.#mergeBlockConcurrency = mergeBlockConcurrency
this.#statePath = dirname(this.#parentPath) + '/.' + basename(this.#parentPath) + '.merge.json'
}
async #writeState() {
await this.#handler.writeFile(this.#statePath, JSON.stringify(this.#state), { flags: 'w' }).catch(warn)
}
async #writeStateThrottled() {
const delay = 10e3
const now = Date.now()
if (now - lastWrite > delay) {
lastWrite = now
await handler.writeFile(path, JSON.stringify(json), { flags: 'w' }).catch(warn)
if (now - this.#lastStateWrittenAt > delay) {
this.#lastStateWrittenAt = now
await this.#writeState()
}
}
}
// make the rename / delete part of the merge process
// will fail if parent and children are in different remote
async function cleanupVhds(handler, chain, { logInfo = noop, removeUnused = false } = {}) {
const parent = chain[0]
const children = chain.slice(1, -1)
const mergeTargetChild = chain[chain.length - 1]
await handler.rename(parent, mergeTargetChild)
return asyncMap(children, child => {
logInfo(`the VHD child is already merged`, { child })
if (removeUnused) {
logInfo(`deleting merged VHD child`, { child })
return VhdAbstract.unlink(handler, child)
}
})
}
module.exports._cleanupVhds = cleanupVhds
// Merge a chain of VHDs into a single VHD
module.exports.mergeVhdChain = limitConcurrency(2)(async function mergeVhdChain(
handler,
chain,
{ onProgress = noop, logInfo = noop, removeUnused = false, mergeBlockConcurrency = 2 } = {}
) {
assert(chain.length >= 2)
const parentPath = chain[0]
const childrenPaths = chain.slice(1)
const mergeStatePath = dirname(parentPath) + '/.' + basename(parentPath) + '.merge.json'
return await Disposable.use(async function* () {
let mergeState
let isResuming = false
async merge() {
try {
const mergeStateContent = await handler.readFile(mergeStatePath)
mergeState = JSON.parse(mergeStateContent)
const mergeStateContent = await this.#handler.readFile(this.#statePath)
this.#state = JSON.parse(mergeStateContent)
// work-around a bug introduce in 97d94b795
//
// currentBlock could be `null` due to the JSON.stringify of a `NaN` value
if (mergeState.currentBlock === null) {
mergeState.currentBlock = 0
if (this.#state.currentBlock === null) {
this.#state.currentBlock = 0
}
this.#isResuming = true
} catch (error) {
if (error.code !== 'ENOENT') {
warn('problem while checking the merge state', { error })
}
}
/* eslint-disable no-fallthrough */
switch (this.#state?.step ?? 'mergeBlocks') {
case 'mergeBlocks':
await this.#step_mergeBlocks()
case 'cleanupVhds':
await this.#step_cleanVhds()
return this.#cleanup()
default:
warn(`Step ${this.#state.step} is unknown`, { state: this.#state })
}
/* eslint-enable no-fallthrough */
}
async *#openVhds() {
// during merging, the end footer of the parent can be overwritten by new blocks
// we should use it as a way to check vhd health
const parentVhd = yield openVhd(handler, parentPath, {
const parentVhd = yield openVhd(this.#handler, this.#parentPath, {
flags: 'r+',
checkSecondFooter: mergeState === undefined,
checkSecondFooter: this.#state === undefined,
})
let childVhd
const parentIsVhdDirectory = parentVhd instanceof VhdDirectory
let childIsVhdDirectory
if (childrenPaths.length !== 1) {
childVhd = yield VhdSynthetic.open(handler, childrenPaths)
if (this.#childrenPaths.length !== 1) {
childVhd = yield VhdSynthetic.open(this.#handler, this.#childrenPaths)
childIsVhdDirectory = childVhd.checkVhdsClass(VhdDirectory)
} else {
childVhd = yield openVhd(handler, childrenPaths[0])
childVhd = yield openVhd(this.#handler, this.#childrenPaths[0])
childIsVhdDirectory = childVhd instanceof VhdDirectory
}
// merging vhdFile must not be concurrently with the potential block reordering after a change
const concurrency = parentIsVhdDirectory && childIsVhdDirectory ? mergeBlockConcurrency : 1
if (mergeState === undefined) {
this.#mergeBlockConcurrency = parentIsVhdDirectory && childIsVhdDirectory ? this.#mergeBlockConcurrency : 1
if (this.#state === undefined) {
// merge should be along a vhd chain
assert.strictEqual(UUID.stringify(childVhd.header.parentUuid), UUID.stringify(parentVhd.footer.uuid))
const parentDiskType = parentVhd.footer.diskType
@@ -133,70 +139,86 @@ module.exports.mergeVhdChain = limitConcurrency(2)(async function mergeVhdChain(
assert.strictEqual(childVhd.footer.diskType, DISK_TYPES.DIFFERENCING)
assert.strictEqual(childVhd.header.blockSize, parentVhd.header.blockSize)
} else {
isResuming = true
// vhd should not have changed to resume
assert.strictEqual(parentVhd.header.checksum, mergeState.parent.header)
assert.strictEqual(childVhd.header.checksum, mergeState.child.header)
assert.strictEqual(parentVhd.header.checksum, this.#state.parent.header)
assert.strictEqual(childVhd.header.checksum, this.#state.child.header)
}
// Read allocation table of child/parent.
await Promise.all([parentVhd.readBlockAllocationTable(), childVhd.readBlockAllocationTable()])
return { childVhd, parentVhd }
}
async #step_mergeBlocks() {
const self = this
await Disposable.use(async function* () {
const { childVhd, parentVhd } = yield* self.#openVhds()
const { maxTableEntries } = childVhd.header
if (self.#state === undefined) {
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
self.#state = {
child: { header: childVhd.header.checksum },
parent: { header: parentVhd.header.checksum },
currentBlock: 0,
mergedDataSize: 0,
step: 'mergeBlocks',
chain: self.#chain.map(vhdPath => handlerPath.relativeFromFile(self.#statePath, vhdPath)),
}
// finds first allocated block for the 2 following loops
while (self.#state.currentBlock < maxTableEntries && !childVhd.containsBlock(self.#state.currentBlock)) {
++self.#state.currentBlock
}
await self.#writeState()
}
await self.#mergeBlocks(parentVhd, childVhd)
await self.#updateHeaders(parentVhd, childVhd)
})
}
async #mergeBlocks(parentVhd, childVhd) {
const { maxTableEntries } = childVhd.header
if (mergeState === undefined) {
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
mergeState = {
child: { header: childVhd.header.checksum },
parent: { header: parentVhd.header.checksum },
currentBlock: 0,
mergedDataSize: 0,
chain: chain.map(vhdPath => handlerPath.relativeFromFile(mergeStatePath, vhdPath)),
}
// finds first allocated block for the 2 following loops
while (mergeState.currentBlock < maxTableEntries && !childVhd.containsBlock(mergeState.currentBlock)) {
++mergeState.currentBlock
}
}
// counts number of allocated blocks
const toMerge = []
for (let block = mergeState.currentBlock; block < maxTableEntries; block++) {
for (let block = this.#state.currentBlock; block < maxTableEntries; block++) {
if (childVhd.containsBlock(block)) {
toMerge.push(block)
}
}
const nBlocks = toMerge.length
onProgress({ total: nBlocks, done: 0 })
this.#onProgress({ total: nBlocks, done: 0 })
const merging = new Set()
let counter = 0
const mergeStateWriter = makeThrottledWriter(handler, mergeStatePath, 10e3)
await mergeStateWriter(mergeState)
await asyncEach(
toMerge,
async blockId => {
merging.add(blockId)
mergeState.mergedDataSize += await parentVhd.mergeBlock(childVhd, blockId, isResuming)
this.#state.mergedDataSize += await parentVhd.mergeBlock(childVhd, blockId, this.#isResuming)
mergeState.currentBlock = Math.min(...merging)
this.#state.currentBlock = Math.min(...merging)
merging.delete(blockId)
onProgress({
this.#onProgress({
total: nBlocks,
done: counter + 1,
})
counter++
mergeStateWriter(mergeState)
this.#writeStateThrottled()
},
{
concurrency,
concurrency: this.#mergeBlockConcurrency,
}
)
onProgress({ total: nBlocks, done: nBlocks })
// ensure data size is correct
await this.#writeState()
this.#onProgress({ total: nBlocks, done: nBlocks })
}
async #updateHeaders(parentVhd, childVhd) {
// some blocks could have been created or moved in parent : write bat
await parentVhd.writeBlockAllocationTable()
@@ -212,19 +234,70 @@ module.exports.mergeVhdChain = limitConcurrency(2)(async function mergeVhdChain(
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
}
await cleanupVhds(handler, chain, { logInfo, removeUnused })
// make the rename / delete part of the merge process
// will fail if parent and children are in different remote
async #step_cleanVhds() {
assert.notEqual(this.#state, undefined)
this.#state.step = 'cleanupVhds'
await this.#writeState()
// should be a disposable
handler.unlink(mergeStatePath).catch(warn)
const chain = this.#chain
const handler = this.#handler
return mergeState.mergedDataSize
}).catch(error => {
const parent = chain[0]
const children = chain.slice(1, -1)
const mergeTargetChild = chain[chain.length - 1]
// in the case is an alias, renaming parent to mergeTargetChild will keep the real data
// of mergeTargetChild in the data folder
// mergeTargetChild is already in an incomplete state, its blocks have been transferred to parent
await VhdAbstract.unlink(handler, mergeTargetChild)
try {
await handler.rename(parent, mergeTargetChild)
} catch (error) {
// maybe the renaming was already successfull during merge
if (error.code === 'ENOENT' && this.#isResuming) {
Disposable.use(openVhd(handler, mergeTargetChild), vhd => {
// we are sure that mergeTargetChild is the right one
assert.strictEqual(vhd.header.checksum, this.#state.parent.header)
})
this.#logInfo(`the VHD parent was already renamed`, { parent, mergeTargetChild })
}
}
await asyncMap(children, child => {
this.#logInfo(`the VHD child is already merged`, { child })
if (this.#removeUnused) {
this.#logInfo(`deleting merged VHD child`, { child })
return VhdAbstract.unlink(handler, child)
}
})
}
async #cleanup() {
const mergedSize = this.#state?.mergedDataSize ?? 0
await this.#handler.unlink(this.#statePath).catch(warn)
return mergedSize
}
}
module.exports.mergeVhdChain = limitConcurrency(2)(async function mergeVhdChain(
handler,
chain,
{ onProgress = noop, logInfo = noop, removeUnused = false, mergeBlockConcurrency = 2 } = {}
) {
const merger = new Merger(handler, chain, { onProgress, logInfo, removeUnused, mergeBlockConcurrency })
try {
return merger.merge()
} catch (error) {
try {
error.chain = chain
} finally {
// eslint-disable-next-line no-unsafe-finally
throw error
}
})
}
})

View File

@@ -284,8 +284,6 @@ class BackupReportsXoPlugin {
getErrorMarkdown(log),
]
const nagiosText = []
// body
for (const status of STATUS) {
const tasks = tasksByStatus[status]
@@ -310,10 +308,6 @@ class BackupReportsXoPlugin {
const { title, body } = taskMarkdown
const subMarkdown = [...body, ...getWarningsMarkdown(task.warnings)]
if (task.status !== 'success') {
nagiosText.push(`[${task.status}] ${title}`)
}
for (const subTask of task.tasks ?? []) {
const taskMarkdown = await getMarkdown(subTask, { formatDate, xo })
if (taskMarkdown === undefined) {
@@ -335,10 +329,6 @@ class BackupReportsXoPlugin {
subject: `[Xen Orchestra] ${log.status} Metadata backup report for ${log.jobName} ${STATUS_ICON[log.status]}`,
markdown: toMarkdown(markdown),
success: log.status === 'success',
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Metadata backup report for ${log.jobName}`
: `[Xen Orchestra] [${log.status}] Metadata backup report for ${log.jobName} - ${nagiosText.join(' ')}`,
})
}
@@ -369,9 +359,6 @@ class BackupReportsXoPlugin {
mailReceivers,
markdown: toMarkdown(markdown),
success: false,
nagiosMarkdown: `[Xen Orchestra] [${log.status}] Backup report for ${jobName}${
log.result?.message !== undefined ? ` - Error : ${log.result.message}` : ''
}`,
})
}
@@ -379,7 +366,6 @@ class BackupReportsXoPlugin {
const skippedVmsText = []
const successfulVmsText = []
const interruptedVmsText = []
const nagiosText = []
let globalMergeSize = 0
let globalTransferSize = 0
@@ -401,16 +387,13 @@ class BackupReportsXoPlugin {
if (type === 'SR') {
const { name_label: name, uuid } = xo.getObject(id)
failedTasksText.push(`### ${name}`, '', `- **UUID**: ${uuid}`)
nagiosText.push(`[(${type} failed) ${name} : ${taskLog.result.message} ]`)
} else {
const { name } = await xo.getRemote(id)
failedTasksText.push(`### ${name}`, '', `- **UUID**: ${id}`)
nagiosText.push(`[(${type} failed) ${name} : ${taskLog.result.message} ]`)
}
} catch (error) {
logger.warn(error)
failedTasksText.push(`### ${UNKNOWN_ITEM}`, '', `- **UUID**: ${id}`)
nagiosText.push(`[(${type} failed) ${id} : ${taskLog.result.message} ]`)
}
failedTasksText.push(
@@ -553,22 +536,17 @@ class BackupReportsXoPlugin {
: taskLog.result.message
}`
)
nagiosText.push(`[(Skipped) ${vm !== undefined ? vm.name_label : 'undefined'} : ${taskLog.result.message} ]`)
} else {
++nFailures
failedTasksText.push(...text, `- **Error**: ${taskLog.result.message}`)
nagiosText.push(`[(Failed) ${vm !== undefined ? vm.name_label : 'undefined'} : ${taskLog.result.message} ]`)
}
} else {
if (taskLog.status === 'failure') {
++nFailures
failedTasksText.push(...text, ...subText)
nagiosText.push(`[${vm !== undefined ? vm.name_label : 'undefined'}: (failed)[${failedSubTasks.toString()}]]`)
} else if (taskLog.status === 'interrupted') {
++nInterrupted
interruptedVmsText.push(...text, ...subText)
nagiosText.push(`[(Interrupted) ${vm !== undefined ? vm.name_label : 'undefined'}]`)
} else {
++nSuccesses
successfulVmsText.push(...text, ...subText)
@@ -614,16 +592,10 @@ class BackupReportsXoPlugin {
markdown: toMarkdown(markdown),
subject: `[Xen Orchestra] ${log.status} Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
success: log.status === 'success',
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
: `[Xen Orchestra] [${
nFailures !== 0 ? 'Failure' : 'Skipped'
}] Backup report for ${jobName} - VMs : ${nagiosText.join(' ')}`,
})
}
_sendReport({ mailReceivers, markdown, nagiosMarkdown, subject, success }) {
_sendReport({ mailReceivers, markdown, subject, success }) {
if (mailReceivers === undefined || mailReceivers.length === 0) {
mailReceivers = this._mailsReceivers
}
@@ -645,11 +617,6 @@ class BackupReportsXoPlugin {
xo.sendSlackMessage({
message: markdown,
}),
xo.sendPassiveCheck !== undefined &&
xo.sendPassiveCheck({
status: success ? 0 : 2,
message: nagiosMarkdown,
}),
xo.sendIcinga2Status !== undefined &&
xo.sendIcinga2Status({
status: success ? 'OK' : 'CRITICAL',
@@ -683,7 +650,6 @@ class BackupReportsXoPlugin {
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
markdown,
success: false,
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${error.message}`,
})
}
@@ -720,7 +686,6 @@ class BackupReportsXoPlugin {
let nSkipped = 0
const failedBackupsText = []
const nagiosText = []
const skippedBackupsText = []
const successfulBackupText = []
@@ -754,13 +719,9 @@ class BackupReportsXoPlugin {
`- **Reason**: ${message === UNHEALTHY_VDI_CHAIN_ERROR ? UNHEALTHY_VDI_CHAIN_MESSAGE : message}`,
''
)
nagiosText.push(`[(Skipped) ${vm !== undefined ? vm.name_label : 'undefined'} : ${message} ]`)
} else {
++nFailures
failedBackupsText.push(...text, `- **Error**: ${message}`, '')
nagiosText.push(`[(Failed) ${vm !== undefined ? vm.name_label : 'undefined'} : ${message} ]`)
}
} else if (!reportOnFailure) {
const { returnedValue } = call
@@ -835,11 +796,6 @@ class BackupReportsXoPlugin {
globalSuccess ? ICON_SUCCESS : nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
}`,
success: globalSuccess,
nagiosMarkdown: globalSuccess
? `[Xen Orchestra] [Success] Backup report for ${tag}`
: `[Xen Orchestra] [${
nFailures !== 0 ? 'Failure' : 'Skipped'
}] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`,
})
}
}

View File

@@ -23,17 +23,24 @@ export const configurationSchema = {
type: 'string',
description: 'The encryption key',
},
host: {
type: 'string',
description: 'The host name in Nagios',
},
service: {
type: 'string',
description: 'The service description in Nagios',
},
},
additionalProperties: false,
required: ['server', 'port', 'key', 'host', 'service'],
required: ['server', 'port', 'key'],
}
export const testSchema = {
type: 'object',
properties: {
host: {
description: 'Nagios host',
type: 'string',
},
service: {
description: 'Nagios service',
type: 'string',
},
},
required: ['host', 'service'],
}
// ===================================================================
@@ -102,15 +109,22 @@ class XoServerNagios {
this._unset()
}
test() {
return this._sendPassiveCheck({
message: 'The server-nagios plugin for Xen Orchestra server seems to be working fine, nicely done :)',
status: OK,
})
test({ host, service }) {
return this._sendPassiveCheck(
{
message: 'The server-nagios plugin for Xen Orchestra server seems to be working fine, nicely done :)',
status: OK,
},
host,
service
)
}
_sendPassiveCheck({ message, status }) {
_sendPassiveCheck({ message, status }, host, service) {
return new Promise((resolve, reject) => {
this._conf.host = host
this._conf.service = service
if (/\r|\n/.test(message)) {
warn('the message must not contain a line break', { message })
for (let i = 0, n = message.length; i < n; ++i) {

View File

@@ -1,3 +1,31 @@
import humanFormat from 'human-format'
import ms from 'ms'
import { createLogger } from '@xen-orchestra/log'
const { warn } = createLogger('xo:server:handleBackupLog')
async function sendToNagios(app, jobName, vmBackupInfo) {
try {
const messageToNagios = {
id: vmBackupInfo.id,
result: vmBackupInfo.result,
size: humanFormat.bytes(vmBackupInfo.size),
duration: ms(vmBackupInfo.end - vmBackupInfo.start),
}
await app.sendPassiveCheck(
{
message: JSON.stringify(messageToNagios),
status: 0,
},
app.getObject(messageToNagios.id).name_label,
jobName
)
} catch (error) {
warn('sendToNagios:', error)
}
}
function forwardResult(log) {
if (log.status === 'failure') {
throw log.result
@@ -6,8 +34,40 @@ function forwardResult(log) {
}
// it records logs generated by `@xen-orchestra/backups/Task#run`
export const handleBackupLog = (log, { logger, localTaskIds, rootTaskId, runJobId = rootTaskId, handleRootTaskId }) => {
const { event, message, taskId } = log
export const handleBackupLog = (
log,
{ vmBackupInfo, app, jobName, logger, localTaskIds, rootTaskId, runJobId = rootTaskId, handleRootTaskId }
) => {
const { event, message, parentId, taskId } = log
if (app !== undefined && jobName !== undefined) {
if (event === 'start') {
if (log.data?.type === 'VM') {
vmBackupInfo.set('vm-' + taskId, {
id: log.data.id,
start: log.timestamp,
})
} else if (vmBackupInfo.has('vm-' + parentId) && log.message === 'export') {
vmBackupInfo.set('export-' + taskId, {
parentId: 'vm-' + parentId,
})
} else if (vmBackupInfo.has('export-' + parentId) && log.message === 'transfer') {
vmBackupInfo.set('transfer-' + taskId, {
parentId: 'export-' + parentId,
})
}
} else if (event === 'end') {
if (vmBackupInfo.has('vm-' + taskId)) {
const data = vmBackupInfo.get('vm-' + taskId)
data.result = log.status
data.end = log.timestamp
sendToNagios(app, jobName, data)
} else if (vmBackupInfo.has('transfer-' + taskId)) {
vmBackupInfo.get(vmBackupInfo.get(vmBackupInfo.get('transfer-' + taskId).parentId).parentId).size =
log.result.size
}
}
}
// If `runJobId` is defined, it means that the root task is already handled by `runJob`
if (runJobId !== undefined) {

View File

@@ -0,0 +1,28 @@
import splitHost from 'split-host'
// https://about.gitlab.com/blog/2021/01/27/we-need-to-talk-no-proxy/
export function shouldProxy(host, { NO_PROXY, no_proxy = NO_PROXY } = process.env) {
if (no_proxy == null) {
return true
}
if (no_proxy === '*') {
return false
}
const { hostname } = splitHost(host)
for (let entry of no_proxy.split(',')) {
entry = entry.trim()
if (entry[0] === '.') {
entry = entry.slice(1)
}
entry = splitHost(entry.trim())
console.log(hostname, entry.hostname)
if (hostname.endsWith(entry.hostname)) {
return false
}
}
return true
}

View File

@@ -0,0 +1,61 @@
import { shouldProxy } from './_shouldProxy.mjs'
import t from 'tap'
const ensureArray = v => (v === undefined ? [] : Array.isArray(v) ? v : [v])
;[
{
no_proxy: null,
ok: 'example.org',
},
{
no_proxy: '*',
nok: 'example.org',
},
{
no_proxy: 'example.org, example.com',
nok: ['example.org', 'example.org:1024', 'example.com'],
ok: 'example.net',
},
{
no_proxy: ['example.org', '.example.org'],
nok: ['example.org', 'example.org:1024', 'sub.example.org'],
ok: 'example.com',
},
// {
// no_proxy: 'example.org:1024',
// nok: ['example.org:1024', 'sub.example.org:1024'],
// ok: ['example.com', 'example.org'],
// },
{
no_proxy: '[::1]',
nok: ['[::1]', '[::1]:1024'],
ok: ['[::2]', '[0::1]'],
},
].forEach(({ no_proxy: noProxies, ok, nok }) => {
for (const no_proxy of ensureArray(noProxies)) {
const opts = { no_proxy }
t.test(String(no_proxy), function (t) {
ok = ensureArray(ok)
if (ok.length !== 0) {
t.test('should proxy', t => {
for (const host of ok) {
t.equal(shouldProxy(host, opts), true, host)
}
t.end()
})
}
nok = ensureArray(nok)
if (nok.length !== 0) {
t.test('should not proxy', t => {
for (const host of nok) {
t.equal(shouldProxy(host, opts), false, host)
}
t.end()
})
}
t.end()
})
}
})

View File

@@ -596,6 +596,22 @@ migrate.resolve = {
migrationNetwork: ['migrationNetwork', 'network', 'administrate'],
}
export async function warmMigration({ vm, sr, startVm, deleteSource }) {
await this.warmMigrateVm(vm, sr, startVm, deleteSource)
}
warmMigration.permission = 'admin'
warmMigration.params = {
vm: {
type: 'string',
},
sr: {
type: 'string',
},
startDestinationVm: { type: 'boolean' },
deleteSourceVm: { type: 'boolean' },
}
// -------------------------------------------------------------------
export const set = defer(async function ($defer, params) {

View File

@@ -149,11 +149,15 @@ export default class BackupNg {
try {
if (!useXoProxy && backupsConfig.disableWorkers) {
const localTaskIds = { __proto__: null }
const vmBackupInfo = new Map()
return await Task.run(
{
name: 'backup run',
onLog: log =>
handleBackupLog(log, {
vmBackupInfo,
app: this._app,
jobName: job.name,
localTaskIds,
logger,
runJobId,
@@ -279,8 +283,12 @@ export default class BackupNg {
const localTaskIds = { __proto__: null }
let result
const vmBackupInfo = new Map()
for await (const log of logsStream) {
result = handleBackupLog(log, {
vmBackupInfo,
app: this._app,
jobName: job.name,
logger,
localTaskIds,
runJobId,
@@ -296,6 +304,7 @@ export default class BackupNg {
}
} else {
const localTaskIds = { __proto__: null }
const vmBackupInfo = new Map()
return await runBackupWorker(
{
config: backupsConfig,
@@ -306,6 +315,9 @@ export default class BackupNg {
},
log =>
handleBackupLog(log, {
vmBackupInfo,
app: this._app,
jobName: job.name,
logger,
localTaskIds,
runJobId,

View File

@@ -0,0 +1,109 @@
import { Backup } from '@xen-orchestra/backups/Backup.js'
import { v4 as generateUuid } from 'uuid'
export default class MigrateVm {
constructor(app) {
this._app = app
}
// Backup should be reinstentiated each time
#createWarmBackup(sourceVmId, srId, jobId) {
const app = this._app
const config = {
snapshotNameLabelTpl: '[XO warm migration {job.name}] {vm.name_label}',
}
const job = {
type: 'backup',
id: jobId,
mode: 'delta',
vms: { id: sourceVmId },
name: `Warm migration`,
srs: { id: srId },
settings: {
'': {
// mandatory for delta replication writer
copyRetention: 1,
},
},
}
const schedule = { id: 'one-time' }
// for now we only support this from the main OA, no proxy
return new Backup({
config,
job,
schedule,
getAdapter: async remoteId => app.getBackupsRemoteAdapter(await app.getRemoteWithCredentials(remoteId)),
// `@xen-orchestra/backups/Backup` expect that `getConnectedRecord` returns a promise
getConnectedRecord: async (xapiType, uuid) => app.getXapiObject(uuid),
})
}
async warmMigrateVm(sourceVmId, srId, startDestVm = true, deleteSource = false) {
// we'll use a one time use continuous replication job with the VM to migrate
const jobId = generateUuid()
const app = this._app
const sourceVm = app.getXapiObject(sourceVmId)
let backup = this.#createWarmBackup(sourceVmId, srId, jobId)
await backup.run()
const xapi = sourceVm.$xapi
const ref = sourceVm.$ref
// stop the source VM before
try {
await xapi.callAsync('VM.clean_shutdown', ref)
} catch (error) {
await xapi.callAsync('VM.hard_shutdown', ref)
}
// make it so it can't be restarted by error
const message =
'This VM has been migrated somewhere else and might not be up to date, check twice before starting it.'
await sourceVm.update_blocked_operations({
start: message,
start_on: message,
})
// run the transfer again to transfer the changed parts
// since the source is stopped, there won't be any new change after
backup = this.#createWarmBackup(sourceVmId, srId)
await backup.run()
// find the destination Vm
const targets = Object.keys(
app.getObjects({
filter: obj => {
return (
'other' in obj &&
obj.other['xo:backup:job'] === jobId &&
obj.other['xo:backup:sr'] === srId &&
obj.other['xo:backup:vm'] === sourceVm.uuid &&
'start' in obj.blockedOperations
)
},
})
)
if (targets.length === 0) {
throw new Error(`Vm target of warm migration not found for ${sourceVmId} on SR ${srId} `)
}
if (targets.length > 1) {
throw new Error(`Multiple target of warm migration found for ${sourceVmId} on SR ${srId} `)
}
const targetVm = app.getXapiObject(targets[0])
// new vm is ready to start
// delta replication writer as set this as blocked
await targetVm.update_blocked_operations({ start: null, start_on: null })
if (startDestVm) {
// boot it
await targetVm.$xapi.startVm(targetVm.$ref)
// wait for really started
// delete source
if (deleteSource) {
sourceVm.$xapi.VM_destroy(sourceVm.$ref)
} else {
// @todo should we delete the snapshot if we keep the source vm ?
}
}
}
}

View File

@@ -19,6 +19,7 @@ const messages = {
errorUnknownItem: 'Unknown {type}',
generateNewMacAddress: 'Generate new MAC addresses',
memoryFree: '{memoryFree} RAM free',
notConfigured: 'Not configured',
utcDate: 'UTC date',
utcTime: 'UTC time',
date: 'Date',

View File

@@ -2860,9 +2860,9 @@ export const changePassword = (oldPassword, newPassword) =>
() => error(_('pwdChangeError'), _('pwdChangeErrorBody'))
)
const _setUserPreferences = preferences =>
const _setUserPreferences = (preferences, userId) =>
_call('user.set', {
id: xo.user.id,
id: userId ?? xo.user.id,
preferences,
})::tap(subscribeCurrentUser.forceRefresh)
@@ -2923,15 +2923,18 @@ export const addOtp = secret =>
noop
)
export const removeOtp = () =>
export const removeOtp = user =>
confirm({
title: _('removeOtpConfirm'),
body: _('removeOtpConfirmMessage'),
}).then(
() =>
_setUserPreferences({
otp: null,
}),
_setUserPreferences(
{
otp: null,
},
resolveId(user)
),
noop
)

View File

@@ -495,6 +495,7 @@ export default decorate([
<li>{_('remoteEncryptionBackupSize')}</li>
</ul>
<input
autoComplete='new-password'
className='form-control'
name='encryptionKey'
onChange={effects.linkState}

View File

@@ -1,7 +1,9 @@
import * as Editable from 'editable'
import _, { messages } from 'intl'
import ActionButton from 'action-button'
import Button from 'button'
import Component from 'base-component'
import Icon from 'icon'
import isEmpty from 'lodash/isEmpty'
import keyBy from 'lodash/keyBy'
import map from 'lodash/map'
@@ -14,7 +16,7 @@ import { get } from '@xen-orchestra/defined'
import { injectIntl } from 'react-intl'
import { Password, Select } from 'form'
import { createUser, deleteUser, deleteUsers, editUser, subscribeGroups, subscribeUsers } from 'xo'
import { createUser, deleteUser, deleteUsers, editUser, removeOtp, subscribeGroups, subscribeUsers } from 'xo'
const permissions = {
none: {
@@ -78,6 +80,17 @@ const USER_COLUMNS = [
itemRenderer: user =>
isEmpty(user.authProviders) && <Editable.Password onChange={password => editUser(user, { password })} value='' />,
},
{
name: 'OTP',
itemRenderer: user =>
user.preferences.otp !== undefined ? (
<Button btnStyle='danger' onClick={() => removeOtp(user)} size='small'>
<Icon icon='remove' /> {_('remove')}
</Button>
) : (
_('notConfigured')
),
},
]
const USER_ACTIONS = [

View File

@@ -11,16 +11,44 @@ const { getPackages } = require('./utils')
const { env } = process
async function run(command, opts, verbose) {
const child = spawn(command, {
...opts,
shell: true,
stdio: verbose ? 'inherit' : 'pipe',
})
const output = []
if (!verbose) {
function onData(chunk) {
output.push(chunk)
}
child.stderr.on('data', onData)
child.stdout.on('data', onData)
}
const code = await fromEvent(child, 'exit')
if (code !== 0) {
for (const chunk of output) {
process.stderr.write(chunk)
}
throw code
}
}
// run a script for each package (also run pre and post)
//
// TODO: https://docs.npmjs.com/misc/scripts#environment
require('exec-promise')(args => {
const {
bail,
concurrency,
parallel,
verbose,
_: [script],
} = getopts(args, {
boolean: ['parallel'],
boolean: ['bail', 'parallel', 'verbose'],
string: ['concurrency'],
})
@@ -37,15 +65,18 @@ require('exec-promise')(args => {
env: Object.assign({}, env, {
PATH: `${dir}/node_modules/.bin${delimiter}${env.PATH}`,
}),
shell: true,
stdio: 'inherit',
}
return forEach.call([`pre${script}`, script, `post${script}`], script => {
const command = scripts[script]
if (command !== undefined) {
console.log(`* ${name}:${script} `, command)
return fromEvent(spawn(command, spawnOpts), 'exit').then(code => {
return run(command, spawnOpts, verbose).catch(code => {
if (code !== 0) {
if (bail) {
// eslint-disable-next-line no-throw-literal
throw `${name}:${script} Error: ` + code
}
++errors
console.log(`* ${name}:${script} Error:`, code)
}

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env node
'use strict'
const { execFileSync, spawnSync } = require('child_process')
const run = (command, args) => spawnSync(command, args, { stdio: 'inherit' }).status
const getFiles = () =>
execFileSync('git', ['diff-index', '--diff-filter=AM', '--ignore-submodules', '--name-only', 'master'], {
encoding: 'utf8',
})
.split('\n')
.filter(_ => _ !== '')
// -----------------------------------------------------------------------------
// Travis vars : https://docs.travis-ci.com/user/environment-variables#default-environment-variables.
if (process.env.TRAVIS_PULL_REQUEST !== 'false') {
const files = getFiles().filter(_ => _.endsWith('.cjs') || _.endsWith('.js') || _.endsWith('.mjs'))
if (files.length !== 0) {
process.exit(run('./node_modules/.bin/jest', ['--findRelatedTests', '--passWithNoTests'].concat(files)))
}
} else {
process.exit(run('yarn', ['test-lint']) + run('yarn', ['test-unit']) + run('yarn', ['test-integration']))
}