Compare commits
4 Commits
trustCerti
...
loadBalanc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a75034bc6d | ||
|
|
921e847711 | ||
|
|
7670ddf2e8 | ||
|
|
d0476b563f |
19
.eslintrc.js
19
.eslintrc.js
@@ -79,25 +79,6 @@ module.exports = {
|
||||
'vue/require-default-prop': 'off', // https://github.com/vuejs/eslint-plugin-vue/issues/2051
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['@xen-orchestra/{web-core,lite,web}/src/pages/**/*.vue'],
|
||||
parserOptions: {
|
||||
sourceType: 'module',
|
||||
},
|
||||
rules: {
|
||||
'vue/multi-word-component-names': 'off',
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['@xen-orchestra/{web-core,lite,web}/typed-router.d.ts'],
|
||||
parserOptions: {
|
||||
sourceType: 'module',
|
||||
},
|
||||
rules: {
|
||||
'eslint-comments/disable-enable-pair': 'off',
|
||||
'eslint-comments/no-unlimited-disable': 'off',
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
parserOptions: {
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -64,7 +64,7 @@ body:
|
||||
id: error-message
|
||||
attributes:
|
||||
label: Error message
|
||||
render: Text
|
||||
render: Markdown
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
|
||||
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@@ -24,12 +24,8 @@ jobs:
|
||||
cache: 'yarn'
|
||||
- name: Install project dependencies
|
||||
run: yarn
|
||||
- name: Ensure yarn.lock is up-to-date
|
||||
run: git diff --exit-code yarn.lock
|
||||
- name: Build the project
|
||||
run: yarn build
|
||||
- name: Unit tests
|
||||
run: yarn test-unit
|
||||
- name: Lint tests
|
||||
run: yarn test-lint
|
||||
- name: Integration tests
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"sinon": "^17.0.1",
|
||||
"tap": "^16.3.0",
|
||||
"test": "^3.2.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "2.1.0",
|
||||
"version": "2.0.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
@@ -20,9 +20,6 @@ function assertListeners(t, event, listeners) {
|
||||
}
|
||||
|
||||
t.beforeEach(function (t) {
|
||||
// work around https://github.com/tapjs/tapjs/issues/998
|
||||
t.context = {}
|
||||
|
||||
t.context.ee = new EventEmitter()
|
||||
t.context.em = new EventListenersManager(t.context.ee)
|
||||
})
|
||||
|
||||
@@ -38,9 +38,9 @@
|
||||
"version": "1.0.1",
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "tap --allow-incomplete-coverage"
|
||||
"test": "tap --branches=72"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^18.7.0"
|
||||
"tap": "^16.2.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
Mount a vhd generated by xen-orchestra to filesystem
|
||||
|
||||
### Library
|
||||
|
||||
```js
|
||||
import { mount } from 'fuse-vhd'
|
||||
|
||||
// return a disposable, see promise-toolbox/Disposable
|
||||
// unmount automatically when disposable is disposed
|
||||
// in case of differencing VHD, it mounts the full chain
|
||||
await mount(handler, diskId, mountPoint)
|
||||
```
|
||||
|
||||
### cli
|
||||
|
||||
From the install folder :
|
||||
|
||||
```
|
||||
cli.mjs <remoteUrl> <vhdPathInRemote> <mountPoint>
|
||||
```
|
||||
|
||||
After installing the package
|
||||
|
||||
```
|
||||
xo-fuse-vhd <remoteUrl> <vhdPathInRemote> <mountPoint>
|
||||
```
|
||||
|
||||
remoteUrl can be found by using cli in `@xen-orchestra/fs` , for example a local remote will have a url like `file:///path/to/remote/root`
|
||||
@@ -1,59 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/fuse-vhd
|
||||
|
||||
[](https://npmjs.org/package/@vates/fuse-vhd)  [](https://bundlephobia.com/result?p=@vates/fuse-vhd) [](https://npmjs.org/package/@vates/fuse-vhd)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/fuse-vhd):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/fuse-vhd
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Mount a vhd generated by xen-orchestra to filesystem
|
||||
|
||||
### Library
|
||||
|
||||
```js
|
||||
import { mount } from 'fuse-vhd'
|
||||
|
||||
// return a disposable, see promise-toolbox/Disposable
|
||||
// unmount automatically when disposable is disposed
|
||||
// in case of differencing VHD, it mounts the full chain
|
||||
await mount(handler, diskId, mountPoint)
|
||||
```
|
||||
|
||||
### cli
|
||||
|
||||
From the install folder :
|
||||
|
||||
```
|
||||
cli.mjs <remoteUrl> <vhdPathInRemote> <mountPoint>
|
||||
```
|
||||
|
||||
After installing the package
|
||||
|
||||
```
|
||||
xo-fuse-vhd <remoteUrl> <vhdPathInRemote> <mountPoint>
|
||||
```
|
||||
|
||||
remoteUrl can be found by using cli in `@xen-orchestra/fs` , for example a local remote will have a url like `file:///path/to/remote/root`
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,26 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
|
||||
import { mount } from './index.mjs'
|
||||
|
||||
async function* main([remoteUrl, vhdPathInRemote, mountPoint]) {
|
||||
if (mountPoint === undefined) {
|
||||
throw new TypeError('missing arg: cli <remoteUrl> <vhdPathInRemote> <mountPoint>')
|
||||
}
|
||||
const handler = yield getSyncedHandler({ url: remoteUrl })
|
||||
const mounted = await mount(handler, vhdPathInRemote, mountPoint)
|
||||
|
||||
let disposePromise
|
||||
process.on('SIGINT', async () => {
|
||||
// ensure single dispose
|
||||
if (!disposePromise) {
|
||||
disposePromise = mounted.dispose()
|
||||
}
|
||||
await disposePromise
|
||||
process.exit()
|
||||
})
|
||||
}
|
||||
|
||||
Disposable.wrap(main)(process.argv.slice(2))
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@vates/fuse-vhd",
|
||||
"version": "2.1.0",
|
||||
"version": "2.0.0",
|
||||
"license": "ISC",
|
||||
"private": false,
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
|
||||
@@ -19,15 +19,11 @@
|
||||
},
|
||||
"main": "./index.mjs",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^4.1.4",
|
||||
"fuse-native": "^2.2.6",
|
||||
"lru-cache": "^7.14.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"vhd-lib": "^4.9.0"
|
||||
},
|
||||
"bin": {
|
||||
"xo-fuse-vhd": "./cli.mjs"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
|
||||
@@ -24,14 +24,14 @@
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"xen-api": "^2.0.1"
|
||||
"xen-api": "^2.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^18.7.0",
|
||||
"tap": "^16.3.0",
|
||||
"tmp": "^0.2.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test-integration": "tap --allow-incomplete-coverage"
|
||||
"test-integration": "tap --lines 97 --functions 95 --branches 74 --statements 97 tests/*.integ.mjs"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { strict as assert } from 'node:assert'
|
||||
import test from 'test'
|
||||
import { describe, it } from 'tap/mocha'
|
||||
|
||||
import {
|
||||
generateHotp,
|
||||
@@ -11,8 +11,6 @@ import {
|
||||
verifyTotp,
|
||||
} from './index.mjs'
|
||||
|
||||
const { describe, it } = test
|
||||
|
||||
describe('generateSecret', function () {
|
||||
it('generates a string of 32 chars', async function () {
|
||||
const secret = generateSecret()
|
||||
@@ -31,9 +31,9 @@
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
"test": "tap"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
"tap": "^16.3.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert/strict')
|
||||
const { describe, it } = require('test')
|
||||
const { describe, it } = require('tap').mocha
|
||||
|
||||
const { every, not, some } = require('./')
|
||||
|
||||
@@ -32,9 +32,9 @@
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
"test": "tap"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
"tap": "^16.0.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert/strict')
|
||||
const { afterEach, describe, it } = require('test')
|
||||
const { afterEach, describe, it } = require('tap').mocha
|
||||
|
||||
const { AlteredRecordError, AuditCore, MissingRecordError, NULL_ID, Storage } = require('.')
|
||||
|
||||
@@ -13,10 +13,10 @@
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
"test": "tap --lines 67 --functions 92 --branches 52 --statements 67"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/decorate-with": "^2.1.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"golike-defer": "^0.5.1",
|
||||
"object-hash": "^2.0.1"
|
||||
@@ -28,6 +28,6 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
"tap": "^16.0.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.44.6",
|
||||
"@xen-orchestra/fs": "^4.1.4",
|
||||
"@xen-orchestra/backups": "^0.44.3",
|
||||
"@xen-orchestra/fs": "^4.1.3",
|
||||
"filenamify": "^6.0.0",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
|
||||
@@ -35,8 +35,6 @@ export const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
|
||||
export const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
|
||||
const IMMUTABILTY_METADATA_FILENAME = '/immutability.json'
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
|
||||
|
||||
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
||||
@@ -751,37 +749,10 @@ export class RemoteAdapter {
|
||||
}
|
||||
|
||||
async readVmBackupMetadata(path) {
|
||||
let json
|
||||
let isImmutable = false
|
||||
let remoteIsImmutable = false
|
||||
// if the remote is immutable, check if this metadatas are also immutables
|
||||
try {
|
||||
// this file is not encrypted
|
||||
await this._handler._readFile(IMMUTABILTY_METADATA_FILENAME)
|
||||
remoteIsImmutable = true
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// this will trigger an EPERM error if the file is immutable
|
||||
json = await this.handler.readFile(path, { flag: 'r+' })
|
||||
// s3 handler don't respect flags
|
||||
} catch (err) {
|
||||
// retry without triggerring immutbaility check ,only on immutable remote
|
||||
if (err.code === 'EPERM' && remoteIsImmutable) {
|
||||
isImmutable = true
|
||||
json = await this._handler.readFile(path, { flag: 'r' })
|
||||
} else {
|
||||
throw err
|
||||
}
|
||||
}
|
||||
// _filename is a private field used to compute the backup id
|
||||
//
|
||||
// it's enumerable to make it cacheable
|
||||
const metadata = { ...JSON.parse(json), _filename: path, isImmutable }
|
||||
const metadata = { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
|
||||
|
||||
// backups created on XenServer < 7.1 via JSON in XML-RPC transports have boolean values encoded as integers, which make them unusable with more recent XAPIs
|
||||
if (typeof metadata.vm.is_a_template === 'number') {
|
||||
|
||||
@@ -6,8 +6,7 @@ function formatVmBackup(backup) {
|
||||
|
||||
let differencingVhds
|
||||
let dynamicVhds
|
||||
// some backups don't use snapshots, therefore cannot be with memory
|
||||
const withMemory = vmSnapshot !== undefined && vmSnapshot.suspend_VDI !== 'OpaqueRef:NULL'
|
||||
const withMemory = vmSnapshot.suspend_VDI !== 'OpaqueRef:NULL'
|
||||
// isVhdDifferencing is either undefined or an object
|
||||
if (isVhdDifferencing !== undefined) {
|
||||
differencingVhds = Object.values(isVhdDifferencing).filter(t => t).length
|
||||
@@ -31,7 +30,6 @@ function formatVmBackup(backup) {
|
||||
}),
|
||||
|
||||
id: backup.id,
|
||||
isImmutable: backup.isImmutable,
|
||||
jobId: backup.jobId,
|
||||
mode: backup.mode,
|
||||
scheduleId: backup.scheduleId,
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.44.6",
|
||||
"version": "0.44.3",
|
||||
"engines": {
|
||||
"node": ">=14.18"
|
||||
},
|
||||
@@ -22,13 +22,13 @@
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/cached-dns.lookup": "^1.0.0",
|
||||
"@vates/compose": "^2.1.0",
|
||||
"@vates/decorate-with": "^2.1.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/disposable": "^0.1.5",
|
||||
"@vates/fuse-vhd": "^2.1.0",
|
||||
"@vates/fuse-vhd": "^2.0.0",
|
||||
"@vates/nbd-client": "^3.0.0",
|
||||
"@vates/parse-duration": "^0.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/fs": "^4.1.4",
|
||||
"@xen-orchestra/fs": "^4.1.3",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"@xen-orchestra/template": "^0.1.0",
|
||||
"app-conf": "^2.3.0",
|
||||
@@ -45,7 +45,7 @@
|
||||
"tar": "^6.1.15",
|
||||
"uuid": "^9.0.0",
|
||||
"vhd-lib": "^4.9.0",
|
||||
"xen-api": "^2.0.1",
|
||||
"xen-api": "^2.0.0",
|
||||
"yazl": "^2.5.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -56,7 +56,7 @@
|
||||
"tmp": "^0.2.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@xen-orchestra/xapi": "^4.2.0"
|
||||
"@xen-orchestra/xapi": "^4.1.0"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"preferGlobal": true,
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.5.1",
|
||||
"xen-api": "^2.0.1"
|
||||
"xen-api": "^2.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "4.1.4",
|
||||
"version": "4.1.3",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
|
||||
@@ -28,7 +28,7 @@
|
||||
"@sindresorhus/df": "^3.1.1",
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/coalesce-calls": "^0.1.0",
|
||||
"@vates/decorate-with": "^2.1.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/read-chunk": "^1.2.0",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"bind-property-descriptor": "^2.0.0",
|
||||
|
||||
@@ -364,7 +364,7 @@ export default class RemoteHandlerAbstract {
|
||||
let data
|
||||
try {
|
||||
// this file is not encrypted
|
||||
data = await this._readFile(normalizePath(ENCRYPTION_DESC_FILENAME))
|
||||
data = await this._readFile(normalizePath(ENCRYPTION_DESC_FILENAME), 'utf-8')
|
||||
const json = JSON.parse(data)
|
||||
encryptionAlgorithm = json.algorithm
|
||||
} catch (error) {
|
||||
@@ -377,7 +377,7 @@ export default class RemoteHandlerAbstract {
|
||||
try {
|
||||
this.#rawEncryptor = _getEncryptor(encryptionAlgorithm, this._remote.encryptionKey)
|
||||
// this file is encrypted
|
||||
const data = await this.__readFile(ENCRYPTION_METADATA_FILENAME)
|
||||
const data = await this.__readFile(ENCRYPTION_METADATA_FILENAME, 'utf-8')
|
||||
JSON.parse(data)
|
||||
} catch (error) {
|
||||
// can be enoent, bad algorithm, or broeken json ( bad key or algorithm)
|
||||
|
||||
@@ -171,12 +171,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
async _readFile(file, { flags, ...options } = {}) {
|
||||
// contrary to createReadStream, readFile expect singular `flag`
|
||||
if (flags !== undefined) {
|
||||
options.flag = flags
|
||||
}
|
||||
|
||||
async _readFile(file, options) {
|
||||
const filePath = this.getFilePath(file)
|
||||
return await this.#addSyncStackTrace(retry, () => fs.readFile(filePath, options), this.#retriesOnEagain)
|
||||
}
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
### make a remote immutable
|
||||
|
||||
launch the `xo-immutable-remote` command. The configuration is stored in the config file.
|
||||
This script must be kept running to make file immutable reliably.
|
||||
|
||||
### make file mutable
|
||||
|
||||
launch the `xo-lift-remote-immutability` cli. The configuration is stored in the config file .
|
||||
|
||||
If the config file have a `liftEvery`, this script will contiue to run and check regularly if there are files to update.
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,41 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @xen-orchestra/immutable-backups
|
||||
|
||||
[](https://npmjs.org/package/@xen-orchestra/immutable-backups)  [](https://bundlephobia.com/result?p=@xen-orchestra/immutable-backups) [](https://npmjs.org/package/@xen-orchestra/immutable-backups)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/immutable-backups):
|
||||
|
||||
```sh
|
||||
npm install --save @xen-orchestra/immutable-backups
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### make a remote immutable
|
||||
|
||||
launch the `xo-immutable-remote` command. The configuration is stored in the config file.
|
||||
This script must be kept running to make file immutable reliably.
|
||||
|
||||
### make file mutable
|
||||
|
||||
launch the `xo-lift-remote-immutability` cli. The configuration is stored in the config file .
|
||||
|
||||
If the config file have a `liftEvery`, this script will contiue to run and check regularly if there are files to update.
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,10 +0,0 @@
|
||||
import fs from 'node:fs/promises'
|
||||
import { dirname, join } from 'node:path'
|
||||
import isBackupMetadata from './isBackupMetadata.mjs'
|
||||
|
||||
export default async path => {
|
||||
if (isBackupMetadata(path)) {
|
||||
// snipe vm metadata cache to force XO to update it
|
||||
await fs.unlink(join(dirname(path), 'cache.json.gz'))
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
import { dirname } from 'node:path'
|
||||
|
||||
// check if we are handling file directly under a vhd directory ( bat, headr, footer,..)
|
||||
export default path => dirname(path).endsWith('.vhd')
|
||||
@@ -1,46 +0,0 @@
|
||||
import { load } from 'app-conf'
|
||||
import { homedir } from 'os'
|
||||
import { join } from 'node:path'
|
||||
import ms from 'ms'
|
||||
|
||||
const APP_NAME = 'xo-immutable-backups'
|
||||
const APP_DIR = new URL('.', import.meta.url).pathname
|
||||
|
||||
export default async function loadConfig() {
|
||||
const config = await load(APP_NAME, {
|
||||
appDir: APP_DIR,
|
||||
ignoreUnknownFormats: true,
|
||||
})
|
||||
if (config.remotes === undefined || config.remotes?.length < 1) {
|
||||
throw new Error(
|
||||
'No remotes are configured in the config file, please add at least one [remotes.<remoteid>] with a root property pointing to the absolute path of the remote to watch'
|
||||
)
|
||||
}
|
||||
if (config.liftEvery) {
|
||||
config.liftEvery = ms(config.liftEvery)
|
||||
}
|
||||
for (const [remoteId, { indexPath, immutabilityDuration, root }] of Object.entries(config.remotes)) {
|
||||
if (!root) {
|
||||
throw new Error(
|
||||
`Remote ${remoteId} don't have a root property,containing the absolute path to the root of a backup repository `
|
||||
)
|
||||
}
|
||||
if (!immutabilityDuration) {
|
||||
throw new Error(
|
||||
`Remote ${remoteId} don't have a immutabilityDuration property to indicate the minimal duration the backups should be protected by immutability `
|
||||
)
|
||||
}
|
||||
if (ms(immutabilityDuration) < ms('1d')) {
|
||||
throw new Error(
|
||||
`Remote ${remoteId} immutability duration is smaller than the minimum allowed (1d), current : ${immutabilityDuration}`
|
||||
)
|
||||
}
|
||||
if (!indexPath) {
|
||||
const basePath = indexPath ?? process.env.XDG_DATA_HOME ?? join(homedir(), '.local', 'share')
|
||||
const immutabilityIndexPath = join(basePath, APP_NAME, remoteId)
|
||||
config.remotes[remoteId].indexPath = immutabilityIndexPath
|
||||
}
|
||||
config.remotes[remoteId].immutabilityDuration = ms(immutabilityDuration)
|
||||
}
|
||||
return config
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
|
||||
# how often does the lift immutability script will run to check if
|
||||
# some files need to be made mutable
|
||||
liftEvery = 1h
|
||||
|
||||
# you can add as many remote as you want, if you change the id ( here : remote1)
|
||||
#[remotes.remote1]
|
||||
#root = "/mnt/ssd/vhdblock/" # the absolute path of the root of the backup repository
|
||||
#immutabilityDuration = 7d # mandatory
|
||||
# optional, default value is false will scan and update the index on start, can be expensive
|
||||
#rebuildIndexOnStart = true
|
||||
|
||||
# the index path is optional, default in XDG_DATA_HOME, or if this is not set, in ~/.local/share
|
||||
#indexPath = "/var/lib/" # will add automatically the application name immutable-backup
|
||||
@@ -1,33 +0,0 @@
|
||||
import { describe, it } from 'node:test'
|
||||
import assert from 'node:assert/strict'
|
||||
import fs from 'node:fs/promises'
|
||||
import path from 'node:path'
|
||||
import { tmpdir } from 'node:os'
|
||||
import * as Directory from './directory.mjs'
|
||||
import { rimraf } from 'rimraf'
|
||||
|
||||
describe('immutable-backups/file', async () => {
|
||||
it('really lock a directory', async () => {
|
||||
const dir = await fs.mkdtemp(path.join(tmpdir(), 'immutable-backups-tests'))
|
||||
const dataDir = path.join(dir, 'data')
|
||||
await fs.mkdir(dataDir)
|
||||
const immutDir = path.join(dir, '.immutable')
|
||||
const filePath = path.join(dataDir, 'test')
|
||||
await fs.writeFile(filePath, 'data')
|
||||
await Directory.makeImmutable(dataDir, immutDir)
|
||||
assert.strictEqual(await Directory.isImmutable(dataDir), true)
|
||||
await assert.rejects(() => fs.writeFile(filePath, 'data'))
|
||||
await assert.rejects(() => fs.appendFile(filePath, 'data'))
|
||||
await assert.rejects(() => fs.unlink(filePath))
|
||||
await assert.rejects(() => fs.rename(filePath, filePath + 'copy'))
|
||||
await assert.rejects(() => fs.writeFile(path.join(dataDir, 'test2'), 'data'))
|
||||
await assert.rejects(() => fs.rename(dataDir, dataDir + 'copy'))
|
||||
await Directory.liftImmutability(dataDir, immutDir)
|
||||
assert.strictEqual(await Directory.isImmutable(dataDir), false)
|
||||
await fs.writeFile(filePath, 'data')
|
||||
await fs.appendFile(filePath, 'data')
|
||||
await fs.unlink(filePath)
|
||||
await fs.rename(dataDir, dataDir + 'copy')
|
||||
await rimraf(dir)
|
||||
})
|
||||
})
|
||||
@@ -1,21 +0,0 @@
|
||||
import execa from 'execa'
|
||||
import { unindexFile, indexFile } from './fileIndex.mjs'
|
||||
|
||||
export async function makeImmutable(dirPath, immutabilityCachePath) {
|
||||
if (immutabilityCachePath) {
|
||||
await indexFile(dirPath, immutabilityCachePath)
|
||||
}
|
||||
await execa('chattr', ['+i', '-R', dirPath])
|
||||
}
|
||||
|
||||
export async function liftImmutability(dirPath, immutabilityCachePath) {
|
||||
if (immutabilityCachePath) {
|
||||
await unindexFile(dirPath, immutabilityCachePath)
|
||||
}
|
||||
await execa('chattr', ['-i', '-R', dirPath])
|
||||
}
|
||||
|
||||
export async function isImmutable(path) {
|
||||
const { stdout } = await execa('lsattr', ['-d', path])
|
||||
return stdout[4] === 'i'
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
# Imutability
|
||||
|
||||
the goal is to make a remote that XO can write, but not modify during the immutability duration set on the remote. That way, it's not possible for XO to delete or encrypt any backup during this period. It protects your backup agains ransomware, at least as long as the attacker does not have a root access to the remote server.
|
||||
|
||||
We target `governance` type of immutability, **the local root account of the remote server will be able to lift immutability**.
|
||||
|
||||
We use the file system capabilities, they are tested on the protection process start.
|
||||
|
||||
It is compatible with encryption at rest made by XO.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
The commands must be run as root on the remote, or by a user with the `CAP_LINUX_IMMUTABLE` capability . On start, the protect process writes into the remote `imutability.json` file its status and the immutability duration.
|
||||
|
||||
the `chattr` and `lsattr` should be installed on the system
|
||||
|
||||
## Configuring
|
||||
|
||||
this package uses app-conf to store its config. The application name is `xo-immutable-backup`. A sample config file is provided in this package.
|
||||
|
||||
## Making a file immutable
|
||||
|
||||
when marking a file or a folder immutable, it create an alias file in the `<indexPath>/<DayOfFileCreation>/<sha256(fullpath)>`.
|
||||
|
||||
`indexPath` can be defined in the config file, otherwise `XDG_HOME` is used. If not available it goes to `~/.local/share`
|
||||
|
||||
This index is used when lifting the immutability of the remote, it will only look at the old enough `<indexPath>/<DayOfFileCreation>/` folders.
|
||||
|
||||
## Real time protecting
|
||||
|
||||
On start, the watcher will create the index if it does not exists.
|
||||
It will also do a checkup to ensure immutability could work on this remote and handle the easiest issues.
|
||||
|
||||
The watching process depends on the backup type, since we don't want to make temporary files and cache immutable.
|
||||
|
||||
It won't protect files during upload, only when the files have been completly written on disk. Real time, in this case, means "protecting critical files as soon as possible after they are uploaded"
|
||||
|
||||
This can be alleviated by :
|
||||
|
||||
- Coupling immutability with encryption to ensure the file is not modified
|
||||
- Making health check to ensure the data are exactly as the snapshot data
|
||||
|
||||
List of protected files :
|
||||
|
||||
```js
|
||||
const PATHS = [
|
||||
// xo configuration backupq
|
||||
'xo-config-backups/*/*/data',
|
||||
'xo-config-backups/*/*/data.json',
|
||||
'xo-config-backups/*/*/metadata.json',
|
||||
// pool backupq
|
||||
'xo-pool-metadata-backups/*/metadata.json',
|
||||
'xo-pool-metadata-backups/*/data',
|
||||
// vm backups , xo-vm-backups/<vmuuid>/
|
||||
'xo-vm-backups/*/*.json',
|
||||
'xo-vm-backups/*/*.xva',
|
||||
'xo-vm-backups/*/*.xva.checksum',
|
||||
// xo-vm-backups/<vmuuid>/vdis/<jobid>/<vdiUuid>
|
||||
'xo-vm-backups/*/vdis/*/*/*.vhd', // can be an alias or a vhd file
|
||||
// for vhd directory :
|
||||
'xo-vm-backups/*/vdis/*/*/data/*.vhd/bat',
|
||||
'xo-vm-backups/*/vdis/*/*/data/*.vhd/header',
|
||||
'xo-vm-backups/*/vdis/*/*/data/*.vhd/footer',
|
||||
]
|
||||
```
|
||||
|
||||
## Releasing protection on old enough files on a remote
|
||||
|
||||
the watcher will periodically check if some file must by unlocked
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### some files are still locked
|
||||
|
||||
add the `rebuildIndexOnStart` option to the config file
|
||||
|
||||
### make remote fully mutable again
|
||||
|
||||
- Update the immutability setting with a 0 duration
|
||||
- launch the `liftProtection` cli.
|
||||
- remove the `protectRemotes` service
|
||||
|
||||
### increasing the immutability duration
|
||||
|
||||
this will prolong immutable file, but won't protect files that are already out of immutability
|
||||
|
||||
### reducing the immutability duration
|
||||
|
||||
change the setting, and launch the `liftProtection` cli , or wait for next planed execution
|
||||
|
||||
### why are my incremental backups not marked as protected in XO ?
|
||||
|
||||
are not marked as protected in XO ?
|
||||
|
||||
For incremental backups to be marked as protected in XO, the entire chain must be under protection. To ensure at least 7 days of backups are protected, you need to set the immutability duration and retention at 14 days, the full backup interval at 7 days
|
||||
|
||||
That means that if the last backup chain is complete ( 7 backup ) it is completely under protection, and if not, the precedent chain is also under protection. K are key backups, and are delta
|
||||
|
||||
```
|
||||
Kd Kdddddd Kdddddd K # 8 backups protected, 2 chains
|
||||
K Kdddddd Kdddddd Kd # 9 backups protected, 2 chains
|
||||
Kdddddd Kdddddd Kdd # 10 backups protected, 2 chains
|
||||
Kddddd Kdddddd Kddd # 11 backups protected, 2 chains
|
||||
Kdddd Kdddddd Kdddd # 12 backups protected, 2 chains
|
||||
Kddd Kdddddd Kddddd # 13 backups protected, 2 chains
|
||||
Kdd Kdddddd Kdddddd # 7 backups protected, 1 chain since precedent full is now mutable
|
||||
Kd Kdddddd Kdddddd K # 8 backups protected, 2 chains
|
||||
```
|
||||
|
||||
### Why doesn't the protect process start ?
|
||||
|
||||
- it should be run as root or by a user with the `CAP_LINUX_IMMUTABLE` capability
|
||||
- the underlying file system should support immutability, especially the `chattr` and `lsattr` command
|
||||
- logs are in journalctl
|
||||
@@ -1,29 +0,0 @@
|
||||
import { describe, it } from 'node:test'
|
||||
import assert from 'node:assert/strict'
|
||||
import fs from 'node:fs/promises'
|
||||
import path from 'node:path'
|
||||
import * as File from './file.mjs'
|
||||
import { tmpdir } from 'node:os'
|
||||
import { rimraf } from 'rimraf'
|
||||
|
||||
describe('immutable-backups/file', async () => {
|
||||
it('really lock a file', async () => {
|
||||
const dir = await fs.mkdtemp(path.join(tmpdir(), 'immutable-backups-tests'))
|
||||
const immutDir = path.join(dir, '.immutable')
|
||||
const filePath = path.join(dir, 'test.ext')
|
||||
await fs.writeFile(filePath, 'data')
|
||||
assert.strictEqual(await File.isImmutable(filePath), false)
|
||||
await File.makeImmutable(filePath, immutDir)
|
||||
assert.strictEqual(await File.isImmutable(filePath), true)
|
||||
await assert.rejects(() => fs.writeFile(filePath, 'data'))
|
||||
await assert.rejects(() => fs.appendFile(filePath, 'data'))
|
||||
await assert.rejects(() => fs.unlink(filePath))
|
||||
await assert.rejects(() => fs.rename(filePath, filePath + 'copy'))
|
||||
await File.liftImmutability(filePath, immutDir)
|
||||
assert.strictEqual(await File.isImmutable(filePath), false)
|
||||
await fs.writeFile(filePath, 'data')
|
||||
await fs.appendFile(filePath, 'data')
|
||||
await fs.unlink(filePath)
|
||||
await rimraf(dir)
|
||||
})
|
||||
})
|
||||
@@ -1,24 +0,0 @@
|
||||
import execa from 'execa'
|
||||
import { unindexFile, indexFile } from './fileIndex.mjs'
|
||||
|
||||
// this work only on linux like systems
|
||||
// this could work on windows : https://4sysops.com/archives/set-and-remove-the-read-only-file-attribute-with-powershell/
|
||||
|
||||
export async function makeImmutable(path, immutabilityCachePath) {
|
||||
if (immutabilityCachePath) {
|
||||
await indexFile(path, immutabilityCachePath)
|
||||
}
|
||||
await execa('chattr', ['+i', path])
|
||||
}
|
||||
|
||||
export async function liftImmutability(filePath, immutabilityCachePath) {
|
||||
if (immutabilityCachePath) {
|
||||
await unindexFile(filePath, immutabilityCachePath)
|
||||
}
|
||||
await execa('chattr', ['-i', filePath])
|
||||
}
|
||||
|
||||
export async function isImmutable(path) {
|
||||
const { stdout } = await execa('lsattr', ['-d', path])
|
||||
return stdout[4] === 'i'
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
import { describe, it } from 'node:test'
|
||||
import assert from 'node:assert/strict'
|
||||
import fs from 'node:fs/promises'
|
||||
import path from 'node:path'
|
||||
import * as FileIndex from './fileIndex.mjs'
|
||||
import * as Directory from './directory.mjs'
|
||||
import { tmpdir } from 'node:os'
|
||||
import { rimraf } from 'rimraf'
|
||||
|
||||
describe('immutable-backups/fileIndex', async () => {
|
||||
it('index File changes', async () => {
|
||||
const dir = await fs.mkdtemp(path.join(tmpdir(), 'immutable-backups-tests'))
|
||||
const immutDir = path.join(dir, '.immutable')
|
||||
const filePath = path.join(dir, 'test.ext')
|
||||
|
||||
await fs.writeFile(filePath, 'data')
|
||||
await FileIndex.indexFile(filePath, immutDir)
|
||||
await fs.mkdir(path.join(immutDir, 'NOTADATE'))
|
||||
await fs.writeFile(path.join(immutDir, 'NOTADATE.file'), 'content')
|
||||
let nb = 0
|
||||
let index, target
|
||||
for await ({ index, target } of FileIndex.listOlderTargets(immutDir, 0)) {
|
||||
assert.strictEqual(true, false, 'Nothing should be eligible for deletion')
|
||||
}
|
||||
nb = 0
|
||||
for await ({ index, target } of FileIndex.listOlderTargets(immutDir, -24 * 60 * 60 * 1000)) {
|
||||
assert.strictEqual(target, filePath)
|
||||
await fs.unlink(index)
|
||||
nb++
|
||||
}
|
||||
assert.strictEqual(nb, 1)
|
||||
await fs.rmdir(path.join(immutDir, 'NOTADATE'))
|
||||
await fs.rm(path.join(immutDir, 'NOTADATE.file'))
|
||||
for await ({ index, target } of FileIndex.listOlderTargets(immutDir, -24 * 60 * 60 * 1000)) {
|
||||
// should remove the empty dir
|
||||
assert.strictEqual(true, false, 'Nothing should have stayed here')
|
||||
}
|
||||
assert.strictEqual((await fs.readdir(immutDir)).length, 0)
|
||||
await rimraf(dir)
|
||||
})
|
||||
|
||||
it('fails correctly', async () => {
|
||||
const dir = await fs.mkdtemp(path.join(tmpdir(), 'immutable-backups-tests'))
|
||||
const immutDir = path.join(dir, '.immutable')
|
||||
await fs.mkdir(immutDir)
|
||||
const placeholderFile = path.join(dir, 'test.ext')
|
||||
await fs.writeFile(placeholderFile, 'data')
|
||||
await FileIndex.indexFile(placeholderFile, immutDir)
|
||||
|
||||
const filePath = path.join(dir, 'test2.ext')
|
||||
await fs.writeFile(filePath, 'data')
|
||||
await FileIndex.indexFile(filePath, immutDir)
|
||||
await assert.rejects(() => FileIndex.indexFile(filePath, immutDir), { code: 'EEXIST' })
|
||||
|
||||
await Directory.makeImmutable(immutDir)
|
||||
await assert.rejects(() => FileIndex.unindexFile(filePath, immutDir), { code: 'EPERM' })
|
||||
await Directory.liftImmutability(immutDir)
|
||||
await rimraf(dir)
|
||||
})
|
||||
|
||||
it('handles bomb index files', async () => {
|
||||
const dir = await fs.mkdtemp(path.join(tmpdir(), 'immutable-backups-tests'))
|
||||
const immutDir = path.join(dir, '.immutable')
|
||||
await fs.mkdir(immutDir)
|
||||
const placeholderFile = path.join(dir, 'test.ext')
|
||||
await fs.writeFile(placeholderFile, 'data')
|
||||
await FileIndex.indexFile(placeholderFile, immutDir)
|
||||
|
||||
const indexDayDir = path.join(immutDir, '1980,11-28')
|
||||
await fs.mkdir(indexDayDir)
|
||||
await fs.writeFile(path.join(indexDayDir, 'big'), Buffer.alloc(2 * 1024 * 1024))
|
||||
assert.rejects(async () => {
|
||||
let index, target
|
||||
for await ({ index, target } of FileIndex.listOlderTargets(immutDir, 0)) {
|
||||
// should remove the empty dir
|
||||
assert.strictEqual(true, false, `Nothing should have stayed here, got ${index} ${target}`)
|
||||
}
|
||||
})
|
||||
await rimraf(dir)
|
||||
})
|
||||
})
|
||||
@@ -1,88 +0,0 @@
|
||||
import { join } from 'node:path'
|
||||
import { createHash } from 'node:crypto'
|
||||
import fs from 'node:fs/promises'
|
||||
import { dirname } from 'path'
|
||||
const MAX_INDEX_FILE_SIZE = 1024 * 1024
|
||||
function sha256(content) {
|
||||
return createHash('sha256').update(content).digest('hex')
|
||||
}
|
||||
|
||||
function formatDate(date) {
|
||||
return date.toISOString().split('T')[0]
|
||||
}
|
||||
|
||||
async function computeIndexFilePath(path, immutabilityIndexPath) {
|
||||
const stat = await fs.stat(path)
|
||||
const date = new Date(stat.birthtimeMs)
|
||||
const day = formatDate(date)
|
||||
const hash = sha256(path)
|
||||
return join(immutabilityIndexPath, day, hash)
|
||||
}
|
||||
|
||||
export async function indexFile(path, immutabilityIndexPath) {
|
||||
const indexFilePath = await computeIndexFilePath(path, immutabilityIndexPath)
|
||||
try {
|
||||
await fs.writeFile(indexFilePath, path, { flag: 'wx' })
|
||||
} catch (err) {
|
||||
// missing dir: make it
|
||||
if (err.code === 'ENOENT') {
|
||||
await fs.mkdir(dirname(indexFilePath), { recursive: true })
|
||||
await fs.writeFile(indexFilePath, path)
|
||||
} else {
|
||||
throw err
|
||||
}
|
||||
}
|
||||
return indexFilePath
|
||||
}
|
||||
|
||||
export async function unindexFile(path, immutabilityIndexPath) {
|
||||
try {
|
||||
const cacheFileName = await computeIndexFilePath(path, immutabilityIndexPath)
|
||||
await fs.unlink(cacheFileName)
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT') {
|
||||
throw err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export async function* listOlderTargets(immutabilityCachePath, immutabilityDuration) {
|
||||
// walk all dir by day until the limit day
|
||||
const limitDate = new Date(Date.now() - immutabilityDuration)
|
||||
|
||||
const limitDay = formatDate(limitDate)
|
||||
const dir = await fs.opendir(immutabilityCachePath)
|
||||
for await (const dirent of dir) {
|
||||
if (dirent.isFile()) {
|
||||
continue
|
||||
}
|
||||
// ensure we have a valid date
|
||||
if (isNaN(new Date(dirent.name))) {
|
||||
continue
|
||||
}
|
||||
// recent enough to be kept
|
||||
if (dirent.name >= limitDay) {
|
||||
continue
|
||||
}
|
||||
const subDirPath = join(immutabilityCachePath, dirent.name)
|
||||
const subdir = await fs.opendir(subDirPath)
|
||||
let nb = 0
|
||||
for await (const hashFileEntry of subdir) {
|
||||
const entryFullPath = join(subDirPath, hashFileEntry.name)
|
||||
const { size } = await fs.stat(entryFullPath)
|
||||
if (size > MAX_INDEX_FILE_SIZE) {
|
||||
throw new Error(`Index file at ${entryFullPath} is too big, ${size} bytes `)
|
||||
}
|
||||
const targetPath = await fs.readFile(entryFullPath, { encoding: 'utf8' })
|
||||
yield {
|
||||
index: entryFullPath,
|
||||
target: targetPath,
|
||||
}
|
||||
nb++
|
||||
}
|
||||
// cleanup older folder
|
||||
if (nb === 0) {
|
||||
await fs.rmdir(subDirPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
export default path => path.match(/xo-vm-backups\/[^/]+\/[^/]+\.json$/)
|
||||
@@ -1,37 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import fs from 'node:fs/promises'
|
||||
import * as Directory from './directory.mjs'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { listOlderTargets } from './fileIndex.mjs'
|
||||
import cleanXoCache from './_cleanXoCache.mjs'
|
||||
import loadConfig from './_loadConfig.mjs'
|
||||
|
||||
const { info, warn } = createLogger('xen-orchestra:immutable-backups:liftProtection')
|
||||
|
||||
async function liftRemoteImmutability(immutabilityCachePath, immutabilityDuration) {
|
||||
for await (const { index, target } of listOlderTargets(immutabilityCachePath, immutabilityDuration)) {
|
||||
await Directory.liftImmutability(target, immutabilityCachePath)
|
||||
await fs.unlink(index)
|
||||
await cleanXoCache(target)
|
||||
}
|
||||
}
|
||||
|
||||
async function liftImmutability(remotes) {
|
||||
for (const [remoteId, { indexPath, immutabilityDuration }] of Object.entries(remotes)) {
|
||||
liftRemoteImmutability(indexPath, immutabilityDuration).catch(err =>
|
||||
warn('error during watchRemote', { err, remoteId, indexPath, immutabilityDuration })
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const { liftEvery, remotes } = await loadConfig()
|
||||
|
||||
if (liftEvery > 0) {
|
||||
info('setup watcher for immutability lifting')
|
||||
setInterval(async () => {
|
||||
liftImmutability(remotes)
|
||||
}, liftEvery)
|
||||
} else {
|
||||
liftImmutability(remotes)
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/immutable-backups",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/immutable-backups",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/immutable-backups",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"bin": {
|
||||
"xo-immutable-remote": "./protectRemotes.mjs",
|
||||
"xo-lift-remote-immutability": "./liftProtection.mjs"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"version": "1.0.1",
|
||||
"engines": {
|
||||
"node": ">=14.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@xen-orchestra/backups": "^0.44.6",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"app-conf": "^2.3.0",
|
||||
"chokidar": "^3.5.3",
|
||||
"execa": "^5.0.0",
|
||||
"ms": "^2.1.3",
|
||||
"vhd-lib": "^4.7.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"rimraf": "^5.0.5",
|
||||
"tap": "^18.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test-integration": "tap *.integ.mjs"
|
||||
}
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import fs from 'node:fs/promises'
|
||||
import * as File from './file.mjs'
|
||||
import * as Directory from './directory.mjs'
|
||||
import assert from 'node:assert'
|
||||
import { dirname, join, sep } from 'node:path'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import chokidar from 'chokidar'
|
||||
import { indexFile } from './fileIndex.mjs'
|
||||
import cleanXoCache from './_cleanXoCache.mjs'
|
||||
import loadConfig from './_loadConfig.mjs'
|
||||
import isInVhdDirectory from './_isInVhdDirectory.mjs'
|
||||
const { debug, info, warn } = createLogger('xen-orchestra:immutable-backups:remote')
|
||||
|
||||
async function test(remotePath, indexPath) {
|
||||
await fs.readdir(remotePath)
|
||||
|
||||
const testPath = join(remotePath, '.test-immut')
|
||||
// cleanup
|
||||
try {
|
||||
await File.liftImmutability(testPath, indexPath)
|
||||
await fs.unlink(testPath)
|
||||
} catch (err) {}
|
||||
// can create , modify and delete a file
|
||||
await fs.writeFile(testPath, `test immut ${new Date()}`)
|
||||
await fs.writeFile(testPath, `test immut change 1 ${new Date()}`)
|
||||
await fs.unlink(testPath)
|
||||
|
||||
// cannot modify or delete an immutable file
|
||||
await fs.writeFile(testPath, `test immut ${new Date()}`)
|
||||
await File.makeImmutable(testPath, indexPath)
|
||||
await assert.rejects(fs.writeFile(testPath, `test immut change 2 ${new Date()}`), { code: 'EPERM' })
|
||||
await assert.rejects(fs.unlink(testPath), { code: 'EPERM' })
|
||||
// can modify and delete a file after lifting immutability
|
||||
await File.liftImmutability(testPath, indexPath)
|
||||
|
||||
await fs.writeFile(testPath, `test immut change 3 ${new Date()}`)
|
||||
await fs.unlink(testPath)
|
||||
}
|
||||
async function handleExistingFile(root, indexPath, path) {
|
||||
try {
|
||||
// a vhd block directory is completly immutable
|
||||
if (isInVhdDirectory(path)) {
|
||||
// this will trigger 3 times per vhd blocks
|
||||
const dir = join(root, dirname(path))
|
||||
if (Directory.isImmutable(dir)) {
|
||||
await indexFile(dir, indexPath)
|
||||
}
|
||||
} else {
|
||||
// other files are immutable a file basis
|
||||
const fullPath = join(root, path)
|
||||
if (File.isImmutable(fullPath)) {
|
||||
await indexFile(fullPath, indexPath)
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
if (err.code !== 'EEXIST') {
|
||||
// there can be a symbolic link in the tree
|
||||
warn('handleExistingFile', err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function handleNewFile(root, indexPath, pendingVhds, path) {
|
||||
// with awaitWriteFinish we have complete files here
|
||||
// we can make them immutable
|
||||
|
||||
if (isInVhdDirectory(path)) {
|
||||
// watching a vhd block
|
||||
// wait for header/footer and BAT before making this immutable recursively
|
||||
const splitted = path.split(sep)
|
||||
const vmUuid = splitted[1]
|
||||
const vdiUuid = splitted[4]
|
||||
const uniqPath = `${vmUuid}/${vdiUuid}`
|
||||
const { existing } = pendingVhds.get(uniqPath) ?? {}
|
||||
if (existing === undefined) {
|
||||
pendingVhds.set(uniqPath, { existing: 1, lastModified: Date.now() })
|
||||
} else {
|
||||
// already two of the key files,and we got the last one
|
||||
if (existing === 2) {
|
||||
await Directory.makeImmutable(join(root, dirname(path)), indexPath)
|
||||
pendingVhds.delete(uniqPath)
|
||||
} else {
|
||||
// wait for the other
|
||||
pendingVhds.set(uniqPath, { existing: existing + 1, lastModified: Date.now() })
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const fullFilePath = join(root, path)
|
||||
await File.makeImmutable(fullFilePath, indexPath)
|
||||
await cleanXoCache(fullFilePath)
|
||||
}
|
||||
}
|
||||
export async function watchRemote(remoteId, { root, immutabilityDuration, rebuildIndexOnStart = false, indexPath }) {
|
||||
// create index directory
|
||||
await fs.mkdir(indexPath, { recursive: true })
|
||||
|
||||
// test if fs and index directories are well configured
|
||||
await test(root, indexPath)
|
||||
|
||||
// add duration and watch status in the metadata.json of the remote
|
||||
const settingPath = join(root, 'immutability.json')
|
||||
try {
|
||||
// this file won't be made mutable by liftimmutability
|
||||
await File.liftImmutability(settingPath)
|
||||
} catch (error) {
|
||||
// file may not exists, and it's not really a problem
|
||||
info('lifting immutability on current settings', error)
|
||||
}
|
||||
await fs.writeFile(
|
||||
settingPath,
|
||||
JSON.stringify({
|
||||
since: Date.now(),
|
||||
immutable: true,
|
||||
duration: immutabilityDuration,
|
||||
})
|
||||
)
|
||||
// no index path in makeImmutable(): the immutability won't be lifted
|
||||
File.makeImmutable(settingPath)
|
||||
|
||||
// we wait for footer/header AND BAT to be written before locking a vhd directory
|
||||
// this map allow us to track the vhd with partial metadata
|
||||
const pendingVhds = new Map()
|
||||
// cleanup pending vhd map periodically
|
||||
setInterval(
|
||||
() => {
|
||||
pendingVhds.forEach(({ lastModified, existing }, path) => {
|
||||
if (Date.now() - lastModified > 60 * 60 * 1000) {
|
||||
pendingVhds.delete(path)
|
||||
warn(`vhd at ${path} is incomplete since ${lastModified}`, { existing, lastModified, path })
|
||||
}
|
||||
})
|
||||
},
|
||||
60 * 60 * 1000
|
||||
)
|
||||
|
||||
// watch the remote for any new VM metadata json file
|
||||
const PATHS = [
|
||||
'xo-config-backups/*/*/data',
|
||||
'xo-config-backups/*/*/data.json',
|
||||
'xo-config-backups/*/*/metadata.json',
|
||||
'xo-pool-metadata-backups/*/metadata.json',
|
||||
'xo-pool-metadata-backups/*/data',
|
||||
// xo-vm-backups/<vmuuid>/
|
||||
'xo-vm-backups/*/*.json',
|
||||
'xo-vm-backups/*/*.xva',
|
||||
'xo-vm-backups/*/*.xva.checksum',
|
||||
// xo-vm-backups/<vmuuid>/vdis/<jobid>/<vdiUuid>
|
||||
'xo-vm-backups/*/vdis/*/*/*.vhd', // can be an alias or a vhd file
|
||||
// for vhd directory :
|
||||
'xo-vm-backups/*/vdis/*/*/data/*.vhd/bat',
|
||||
'xo-vm-backups/*/vdis/*/*/data/*.vhd/header',
|
||||
'xo-vm-backups/*/vdis/*/*/data/*.vhd/footer',
|
||||
]
|
||||
|
||||
let ready = false
|
||||
const watcher = chokidar.watch(PATHS, {
|
||||
ignored: [
|
||||
/(^|[/\\])\../, // ignore dotfiles
|
||||
/\.lock$/,
|
||||
],
|
||||
cwd: root,
|
||||
recursive: false, // vhd directory can generate a lot of folder, don't let chokidar choke on this
|
||||
ignoreInitial: !rebuildIndexOnStart,
|
||||
depth: 7,
|
||||
awaitWriteFinish: true,
|
||||
})
|
||||
|
||||
// Add event listeners.
|
||||
watcher
|
||||
.on('add', async path => {
|
||||
debug(`File ${path} has been added ${path.split('/').length}`)
|
||||
if (ready) {
|
||||
await handleNewFile(root, indexPath, pendingVhds, path)
|
||||
} else {
|
||||
await handleExistingFile(root, indexPath, path)
|
||||
}
|
||||
})
|
||||
.on('error', error => warn(`Watcher error: ${error}`))
|
||||
.on('ready', () => {
|
||||
ready = true
|
||||
info('Ready for changes')
|
||||
})
|
||||
}
|
||||
|
||||
const { remotes } = await loadConfig()
|
||||
|
||||
for (const [remoteId, remote] of Object.entries(remotes)) {
|
||||
watchRemote(remoteId, remote).catch(err => warn('error during watchRemote', { err, remoteId, remote }))
|
||||
}
|
||||
@@ -2,10 +2,10 @@
|
||||
"files": [],
|
||||
"references": [
|
||||
{
|
||||
"path": "./tsconfig.node.json",
|
||||
"path": "./tsconfig.node.json"
|
||||
},
|
||||
{
|
||||
"path": "./tsconfig.app.json",
|
||||
},
|
||||
],
|
||||
"path": "./tsconfig.app.json"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/proxy",
|
||||
"version": "0.26.45",
|
||||
"version": "0.26.42",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "XO Proxy used to remotely execute backup jobs",
|
||||
"keywords": [
|
||||
@@ -29,16 +29,16 @@
|
||||
"@koa/router": "^12.0.0",
|
||||
"@vates/cached-dns.lookup": "^1.0.0",
|
||||
"@vates/compose": "^2.1.0",
|
||||
"@vates/decorate-with": "^2.1.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/disposable": "^0.1.5",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.44.6",
|
||||
"@xen-orchestra/fs": "^4.1.4",
|
||||
"@xen-orchestra/backups": "^0.44.3",
|
||||
"@xen-orchestra/fs": "^4.1.3",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"@xen-orchestra/mixin": "^0.1.0",
|
||||
"@xen-orchestra/mixins": "^0.14.0",
|
||||
"@xen-orchestra/self-signed": "^0.2.0",
|
||||
"@xen-orchestra/xapi": "^4.2.0",
|
||||
"@xen-orchestra/self-signed": "^0.1.3",
|
||||
"@xen-orchestra/xapi": "^4.1.0",
|
||||
"ajv": "^8.0.3",
|
||||
"app-conf": "^2.3.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
@@ -60,7 +60,7 @@
|
||||
"source-map-support": "^0.5.16",
|
||||
"stoppable": "^1.0.6",
|
||||
"xdg-basedir": "^5.1.0",
|
||||
"xen-api": "^2.0.1",
|
||||
"xen-api": "^2.0.0",
|
||||
"xo-common": "^0.8.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.2.0",
|
||||
"version": "0.1.3",
|
||||
"engines": {
|
||||
"node": ">=15.6"
|
||||
},
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { readChunkStrict, skipStrict } from '@vates/read-chunk'
|
||||
import _computeGeometryForSize from 'vhd-lib/_computeGeometryForSize.js'
|
||||
import { createFooter, createHeader } from 'vhd-lib/_createFooterHeader.js'
|
||||
import { DISK_TYPES, FOOTER_SIZE } from 'vhd-lib/_constants.js'
|
||||
@@ -69,10 +68,6 @@ export default class VhdEsxiSeSparse extends VhdAbstract {
|
||||
#grainTableOffsetBytes
|
||||
#grainOffsetBytes
|
||||
|
||||
#reading = false
|
||||
#stream
|
||||
#streamOffset = 0
|
||||
|
||||
static async open(esxi, datastore, path, parentVhd, opts) {
|
||||
const vhd = new VhdEsxiSeSparse(esxi, datastore, path, parentVhd, opts)
|
||||
await vhd.readHeaderAndFooter()
|
||||
@@ -107,58 +102,12 @@ export default class VhdEsxiSeSparse extends VhdAbstract {
|
||||
)
|
||||
}
|
||||
|
||||
// since most of the data are writtent sequentially we always open a stream from start to the end of the file
|
||||
// If we have to rewind it, we destroy the stream and recreate with the right "start"
|
||||
// We also recreate the stream if there is too much distance between current position and the wanted position
|
||||
|
||||
async #read(start, length) {
|
||||
if (!this.#footer) {
|
||||
// we need to be able before the footer is loaded, to read the header and footer
|
||||
return (await this.#esxi.download(this.#datastore, this.#path, `${start}-${start + length - 1}`)).buffer()
|
||||
}
|
||||
if (this.#reading) {
|
||||
throw new Error('reading must be done sequentially')
|
||||
}
|
||||
try {
|
||||
const MAX_SKIPPABLE_LENGTH = 2 * 1024 * 1024
|
||||
this.#reading = true
|
||||
if (this.#stream !== undefined) {
|
||||
// stream is already ahead or to far behind
|
||||
if (this.#streamOffset > start || this.#streamOffset + MAX_SKIPPABLE_LENGTH < start) {
|
||||
this.#stream.destroy()
|
||||
this.#stream = undefined
|
||||
this.#streamOffset = 0
|
||||
}
|
||||
}
|
||||
// no stream
|
||||
if (this.#stream === undefined) {
|
||||
const end = this.footer.currentSize - 1
|
||||
const res = await this.#esxi.download(this.#datastore, this.#path, `${start}-${end}`)
|
||||
this.#stream = res.body
|
||||
this.#streamOffset = start
|
||||
}
|
||||
|
||||
// stream a little behind
|
||||
if (this.#streamOffset < start) {
|
||||
await skipStrict(this.#stream, start - this.#streamOffset)
|
||||
this.#streamOffset = start
|
||||
}
|
||||
|
||||
// really read data
|
||||
this.#streamOffset += length
|
||||
const data = await readChunkStrict(this.#stream, length)
|
||||
return data
|
||||
} catch (error) {
|
||||
error.start = start
|
||||
error.length = length
|
||||
error.streamLength = this.footer.currentSize
|
||||
this.#stream?.destroy()
|
||||
this.#stream = undefined
|
||||
this.#streamOffset = 0
|
||||
throw error
|
||||
} finally {
|
||||
this.#reading = false
|
||||
}
|
||||
const buffer = await (
|
||||
await this.#esxi.download(this.#datastore, this.#path, `${start}-${start + length - 1}`)
|
||||
).buffer()
|
||||
strictEqual(buffer.length, length)
|
||||
return buffer
|
||||
}
|
||||
|
||||
async readHeaderAndFooter() {
|
||||
@@ -250,28 +199,15 @@ export default class VhdEsxiSeSparse extends VhdAbstract {
|
||||
|
||||
async readBlock(blockId) {
|
||||
let changed = false
|
||||
const parentBlock = await this.#parentVhd.readBlock(blockId)
|
||||
const parentBuffer = parentBlock.buffer
|
||||
const grainOffsets = this.#grainIndex.get(blockId) // may be undefined if the child contains block and lookMissingBlockInParent=true
|
||||
|
||||
// negative value indicate that it's not an offset
|
||||
// SE_SPARSE_GRAIN_NON_ALLOCATED means we have to look into the parent data
|
||||
const isLocallyFull = !grainOffsets.some(value => value === -SE_SPARSE_GRAIN_NON_ALLOCATED)
|
||||
|
||||
let parentBuffer, parentBlock
|
||||
// don't read from parent is current block is already completly described
|
||||
if (isLocallyFull) {
|
||||
parentBuffer = Buffer.alloc(512 /* bitmap */ + 2 * 1024 * 1024 /* data */, 0)
|
||||
parentBuffer.fill(255, 0, 512) // bitmap is full of bit 1
|
||||
} else {
|
||||
parentBlock = await this.#parentVhd.readBlock(blockId)
|
||||
parentBuffer = parentBlock.buffer
|
||||
}
|
||||
const EMPTY_GRAIN = Buffer.alloc(GRAIN_SIZE_BYTES, 0)
|
||||
for (const index in grainOffsets) {
|
||||
const value = grainOffsets[index]
|
||||
let data
|
||||
if (value > 0) {
|
||||
// it's the offset in byte of a grain type SE_SPARSE_GRAIN_ALLOCATED
|
||||
// @todo this part can be quite slow when grain are not sorted
|
||||
data = await this.#read(value, GRAIN_SIZE_BYTES)
|
||||
} else {
|
||||
// back to the real grain type
|
||||
@@ -294,7 +230,7 @@ export default class VhdEsxiSeSparse extends VhdAbstract {
|
||||
}
|
||||
}
|
||||
// no need to copy if data all come from parent
|
||||
return changed || !parentBlock
|
||||
return changed
|
||||
? {
|
||||
id: blockId,
|
||||
bitmap: parentBuffer.slice(0, 512),
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"license": "ISC",
|
||||
"private": false,
|
||||
"version": "0.4.0",
|
||||
"version": "0.3.1",
|
||||
"name": "@xen-orchestra/vmware-explorer",
|
||||
"dependencies": {
|
||||
"@vates/node-vsphere-soap": "^2.0.0",
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" href="./favicon.svg" type="image/svg+xml" />
|
||||
<link rel="icon" href="/favicon.svg" type="image/svg+xml" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Xen Orchestra</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
<script type="module" src="./src/main.ts"></script>
|
||||
<script type="module" src="/src/main.ts"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
"npm-run-all2": "^6.1.1",
|
||||
"pinia": "^2.1.7",
|
||||
"typescript": "~5.3.3",
|
||||
"unplugin-vue-router": "^0.7.0",
|
||||
"vite": "^5.0.11",
|
||||
"vue": "^3.4.13",
|
||||
"vue-tsc": "^1.8.27"
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
<template>
|
||||
<RouterView />
|
||||
<div>Xen Orchestra 6</div>
|
||||
</template>
|
||||
|
||||
@@ -1,17 +1,10 @@
|
||||
import { createApp } from 'vue'
|
||||
import { createPinia } from 'pinia'
|
||||
import App from './App.vue'
|
||||
// eslint-disable-next-line import/no-unresolved -- https://github.com/posva/unplugin-vue-router/issues/232
|
||||
import { createRouter, createWebHashHistory } from 'vue-router/auto'
|
||||
import '@xen-orchestra/web-core/assets/css/base.pcss'
|
||||
|
||||
const app = createApp(App)
|
||||
|
||||
const router = createRouter({
|
||||
history: createWebHashHistory(),
|
||||
})
|
||||
|
||||
app.use(createPinia())
|
||||
app.use(router)
|
||||
|
||||
app.mount('#app')
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
<template>Welcome to Xen Orchestra 6</template>
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"extends": "@vue/tsconfig/tsconfig.dom.json",
|
||||
"include": ["env.d.ts", "typed-router.d.ts", "src/**/*", "src/**/*.vue"],
|
||||
"include": ["env.d.ts", "src/**/*", "src/**/*.vue"],
|
||||
"exclude": ["src/**/__tests__/*"],
|
||||
"compilerOptions": {
|
||||
"composite": true,
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
"files": [],
|
||||
"references": [
|
||||
{
|
||||
"path": "./tsconfig.node.json",
|
||||
"path": "./tsconfig.node.json"
|
||||
},
|
||||
{
|
||||
"path": "./tsconfig.app.json",
|
||||
},
|
||||
],
|
||||
"path": "./tsconfig.app.json"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
144
@xen-orchestra/web/typed-router.d.ts
vendored
144
@xen-orchestra/web/typed-router.d.ts
vendored
@@ -1,144 +0,0 @@
|
||||
/* eslint-disable */
|
||||
/* prettier-ignore */
|
||||
// @ts-nocheck
|
||||
// Generated by unplugin-vue-router. ‼️ DO NOT MODIFY THIS FILE ‼️
|
||||
// It's recommended to commit this file.
|
||||
// Make sure to add this file to your tsconfig.json file as an "includes" or "files" entry.
|
||||
|
||||
/// <reference types="unplugin-vue-router/client" />
|
||||
|
||||
import type {
|
||||
// type safe route locations
|
||||
RouteLocationTypedList,
|
||||
RouteLocationResolvedTypedList,
|
||||
RouteLocationNormalizedTypedList,
|
||||
RouteLocationNormalizedLoadedTypedList,
|
||||
RouteLocationAsString,
|
||||
RouteLocationAsRelativeTypedList,
|
||||
RouteLocationAsPathTypedList,
|
||||
|
||||
// helper types
|
||||
// route definitions
|
||||
RouteRecordInfo,
|
||||
ParamValue,
|
||||
ParamValueOneOrMore,
|
||||
ParamValueZeroOrMore,
|
||||
ParamValueZeroOrOne,
|
||||
|
||||
// vue-router extensions
|
||||
_RouterTyped,
|
||||
RouterLinkTyped,
|
||||
RouterLinkPropsTyped,
|
||||
NavigationGuard,
|
||||
UseLinkFnTyped,
|
||||
|
||||
// data fetching
|
||||
_DataLoader,
|
||||
_DefineLoaderOptions,
|
||||
} from 'unplugin-vue-router/types'
|
||||
|
||||
declare module 'vue-router/auto/routes' {
|
||||
export interface RouteNamedMap {
|
||||
'/': RouteRecordInfo<'/', '/', Record<never, never>, Record<never, never>>
|
||||
}
|
||||
}
|
||||
|
||||
declare module 'vue-router/auto' {
|
||||
import type { RouteNamedMap } from 'vue-router/auto/routes'
|
||||
|
||||
export type RouterTyped = _RouterTyped<RouteNamedMap>
|
||||
|
||||
/**
|
||||
* Type safe version of `RouteLocationNormalized` (the type of `to` and `from` in navigation guards).
|
||||
* Allows passing the name of the route to be passed as a generic.
|
||||
*/
|
||||
export type RouteLocationNormalized<Name extends keyof RouteNamedMap = keyof RouteNamedMap> =
|
||||
RouteLocationNormalizedTypedList<RouteNamedMap>[Name]
|
||||
|
||||
/**
|
||||
* Type safe version of `RouteLocationNormalizedLoaded` (the return type of `useRoute()`).
|
||||
* Allows passing the name of the route to be passed as a generic.
|
||||
*/
|
||||
export type RouteLocationNormalizedLoaded<Name extends keyof RouteNamedMap = keyof RouteNamedMap> =
|
||||
RouteLocationNormalizedLoadedTypedList<RouteNamedMap>[Name]
|
||||
|
||||
/**
|
||||
* Type safe version of `RouteLocationResolved` (the returned route of `router.resolve()`).
|
||||
* Allows passing the name of the route to be passed as a generic.
|
||||
*/
|
||||
export type RouteLocationResolved<Name extends keyof RouteNamedMap = keyof RouteNamedMap> =
|
||||
RouteLocationResolvedTypedList<RouteNamedMap>[Name]
|
||||
|
||||
/**
|
||||
* Type safe version of `RouteLocation` . Allows passing the name of the route to be passed as a generic.
|
||||
*/
|
||||
export type RouteLocation<Name extends keyof RouteNamedMap = keyof RouteNamedMap> =
|
||||
RouteLocationTypedList<RouteNamedMap>[Name]
|
||||
|
||||
/**
|
||||
* Type safe version of `RouteLocationRaw` . Allows passing the name of the route to be passed as a generic.
|
||||
*/
|
||||
export type RouteLocationRaw<Name extends keyof RouteNamedMap = keyof RouteNamedMap> =
|
||||
| RouteLocationAsString<RouteNamedMap>
|
||||
| RouteLocationAsRelativeTypedList<RouteNamedMap>[Name]
|
||||
| RouteLocationAsPathTypedList<RouteNamedMap>[Name]
|
||||
|
||||
/**
|
||||
* Generate a type safe params for a route location. Requires the name of the route to be passed as a generic.
|
||||
*/
|
||||
export type RouteParams<Name extends keyof RouteNamedMap> = RouteNamedMap[Name]['params']
|
||||
/**
|
||||
* Generate a type safe raw params for a route location. Requires the name of the route to be passed as a generic.
|
||||
*/
|
||||
export type RouteParamsRaw<Name extends keyof RouteNamedMap> = RouteNamedMap[Name]['paramsRaw']
|
||||
|
||||
export function useRouter(): RouterTyped
|
||||
export function useRoute<Name extends keyof RouteNamedMap = keyof RouteNamedMap>(
|
||||
name?: Name
|
||||
): RouteLocationNormalizedLoadedTypedList<RouteNamedMap>[Name]
|
||||
|
||||
export const useLink: UseLinkFnTyped<RouteNamedMap>
|
||||
|
||||
export function onBeforeRouteLeave(guard: NavigationGuard<RouteNamedMap>): void
|
||||
export function onBeforeRouteUpdate(guard: NavigationGuard<RouteNamedMap>): void
|
||||
|
||||
export const RouterLink: RouterLinkTyped<RouteNamedMap>
|
||||
export const RouterLinkProps: RouterLinkPropsTyped<RouteNamedMap>
|
||||
|
||||
// Experimental Data Fetching
|
||||
|
||||
export function defineLoader<
|
||||
P extends Promise<any>,
|
||||
Name extends keyof RouteNamedMap = keyof RouteNamedMap,
|
||||
isLazy extends boolean = false,
|
||||
>(
|
||||
name: Name,
|
||||
loader: (route: RouteLocationNormalizedLoaded<Name>) => P,
|
||||
options?: _DefineLoaderOptions<isLazy>
|
||||
): _DataLoader<Awaited<P>, isLazy>
|
||||
export function defineLoader<P extends Promise<any>, isLazy extends boolean = false>(
|
||||
loader: (route: RouteLocationNormalizedLoaded) => P,
|
||||
options?: _DefineLoaderOptions<isLazy>
|
||||
): _DataLoader<Awaited<P>, isLazy>
|
||||
|
||||
export {
|
||||
_definePage as definePage,
|
||||
_HasDataLoaderMeta as HasDataLoaderMeta,
|
||||
_setupDataFetchingGuard as setupDataFetchingGuard,
|
||||
_stopDataFetchingScope as stopDataFetchingScope,
|
||||
} from 'unplugin-vue-router/runtime'
|
||||
}
|
||||
|
||||
declare module 'vue-router' {
|
||||
import type { RouteNamedMap } from 'vue-router/auto/routes'
|
||||
|
||||
export interface TypesConfig {
|
||||
beforeRouteUpdate: NavigationGuard<RouteNamedMap>
|
||||
beforeRouteLeave: NavigationGuard<RouteNamedMap>
|
||||
|
||||
$route: RouteLocationNormalizedLoadedTypedList<RouteNamedMap>[keyof RouteNamedMap]
|
||||
$router: _RouterTyped<RouteNamedMap>
|
||||
|
||||
RouterLink: RouterLinkTyped<RouteNamedMap>
|
||||
}
|
||||
}
|
||||
@@ -2,12 +2,10 @@ import { fileURLToPath, URL } from 'node:url'
|
||||
|
||||
import { defineConfig } from 'vite'
|
||||
import vue from '@vitejs/plugin-vue'
|
||||
import vueRouter from 'unplugin-vue-router/vite'
|
||||
|
||||
// https://vitejs.dev/config/
|
||||
export default defineConfig({
|
||||
base: './',
|
||||
plugins: [vueRouter(), vue()],
|
||||
plugins: [vue()],
|
||||
resolve: {
|
||||
alias: {
|
||||
'@': fileURLToPath(new URL('./src', import.meta.url)),
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/xapi",
|
||||
"version": "4.2.0",
|
||||
"version": "4.1.0",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
@@ -16,7 +16,7 @@
|
||||
},
|
||||
"main": "./index.mjs",
|
||||
"peerDependencies": {
|
||||
"xen-api": "^2.0.1"
|
||||
"xen-api": "^2.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
@@ -24,7 +24,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/decorate-with": "^2.1.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/nbd-client": "^3.0.0",
|
||||
"@vates/task": "^0.2.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,3 +0,0 @@
|
||||
const formatCounter = counter => String(counter).padStart(8, '0')
|
||||
|
||||
export const formatBlockPath = (basePath, counter) => `${basePath}/${formatCounter(counter)}`
|
||||
@@ -1,5 +0,0 @@
|
||||
export function isNotEmptyRef(val) {
|
||||
const EMPTY = 'OpaqueRef:NULL'
|
||||
const PREFIX = 'OpaqueRef:'
|
||||
return val !== EMPTY && typeof val === 'string' && val.startsWith(PREFIX)
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
// from package xml-escape
|
||||
function escape(string) {
|
||||
if (string === null || string === undefined) return
|
||||
if (typeof string === 'number') {
|
||||
return string
|
||||
}
|
||||
const map = {
|
||||
'>': '>',
|
||||
'<': '<',
|
||||
"'": ''',
|
||||
'"': '"',
|
||||
'&': '&',
|
||||
}
|
||||
|
||||
const pattern = '([&"<>\'])'
|
||||
return string.replace(new RegExp(pattern, 'g'), function (str, item) {
|
||||
return map[item]
|
||||
})
|
||||
}
|
||||
|
||||
function formatDate(d) {
|
||||
return d.toISOString().replaceAll('-', '').replace('.000Z', 'Z')
|
||||
}
|
||||
|
||||
export default function toOvaXml(obj) {
|
||||
if (Array.isArray(obj)) {
|
||||
return `<value><array><data>${obj.map(val => toOvaXml(val)).join('')}</data></array></value>`
|
||||
}
|
||||
|
||||
if (typeof obj === 'object') {
|
||||
if (obj instanceof Date) {
|
||||
return `<value><dateTime.iso8601>${escape(formatDate(obj))}</dateTime.iso8601></value>`
|
||||
}
|
||||
return `<value><struct>${Object.entries(obj)
|
||||
.map(([key, value]) => `<member><name>${escape(key)}</name>${toOvaXml(value)}</member>`)
|
||||
.join('')}</struct></value>`
|
||||
}
|
||||
if (typeof obj === 'boolean') {
|
||||
return `<value><boolean>${obj ? 1 : 0}</boolean></value>`
|
||||
}
|
||||
return `<value>${escape(obj)}</value>`
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
import { formatBlockPath } from './_formatBlockPath.mjs'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
import { xxhash64 } from 'hash-wasm'
|
||||
|
||||
export const XVA_DISK_CHUNK_LENGTH = 1024 * 1024
|
||||
|
||||
async function addEntry(pack, name, buffer) {
|
||||
await fromCallback.call(pack, pack.entry, { name }, buffer)
|
||||
}
|
||||
|
||||
async function writeBlock(pack, data, name) {
|
||||
if (data.length < XVA_DISK_CHUNK_LENGTH) {
|
||||
data = Buffer.concat([data, Buffer.alloc(XVA_DISK_CHUNK_LENGTH - data.length, 0)])
|
||||
}
|
||||
await addEntry(pack, name, data)
|
||||
// weirdly, ocaml and xxhash return the bytes in reverse order to each other
|
||||
const hash = (await xxhash64(data)).toString('hex').toUpperCase()
|
||||
await addEntry(pack, `${name}.xxhash`, Buffer.from(hash, 'utf8'))
|
||||
}
|
||||
|
||||
export default async function addDisk(pack, vhd, basePath) {
|
||||
let counter = 0
|
||||
let written
|
||||
let lastBlockWrittenAt = Date.now()
|
||||
const MAX_INTERVAL_BETWEEN_BLOCKS = 60 * 1000
|
||||
const empty = Buffer.alloc(XVA_DISK_CHUNK_LENGTH, 0)
|
||||
const stream = await vhd.rawContent()
|
||||
let lastBlockLength
|
||||
const diskSize = vhd.footer.currentSize
|
||||
let remaining = diskSize
|
||||
while (remaining > 0) {
|
||||
lastBlockLength = Math.min(XVA_DISK_CHUNK_LENGTH, remaining)
|
||||
const data = await readChunkStrict(stream, lastBlockLength)
|
||||
remaining -= lastBlockLength
|
||||
if (
|
||||
// write first block
|
||||
counter === 0 ||
|
||||
// write all non empty blocks
|
||||
!data.equals(empty) ||
|
||||
// write one block from time to time to ensure there is no timeout
|
||||
// occurring while passing empty blocks
|
||||
Date.now() - lastBlockWrittenAt > MAX_INTERVAL_BETWEEN_BLOCKS
|
||||
) {
|
||||
written = true
|
||||
await writeBlock(pack, data, formatBlockPath(basePath, counter))
|
||||
lastBlockWrittenAt = Date.now()
|
||||
} else {
|
||||
written = false
|
||||
}
|
||||
counter++
|
||||
}
|
||||
if (!written) {
|
||||
// last block must be present
|
||||
await writeBlock(pack, empty, formatBlockPath(basePath, counter - 1))
|
||||
}
|
||||
}
|
||||
@@ -1,156 +0,0 @@
|
||||
import assert from 'node:assert'
|
||||
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { v4 as uuid } from 'uuid'
|
||||
import defaultsDeep from 'lodash.defaultsdeep'
|
||||
|
||||
import { DEFAULT_VBD } from './templates/vbd.mjs'
|
||||
import { DEFAULT_VDI } from './templates/vdi.mjs'
|
||||
import { DEFAULT_VIF } from './templates/vif.mjs'
|
||||
import { DEFAULT_VM } from './templates/vm.mjs'
|
||||
import toOvaXml from './_toOvaXml.mjs'
|
||||
import { XVA_DISK_CHUNK_LENGTH } from './_writeDisk.mjs'
|
||||
|
||||
export default async function writeOvaXml(
|
||||
pack,
|
||||
{ memory, networks, nCpus, firmware, vdis, vhds, ...vmSnapshot },
|
||||
{ sr, network }
|
||||
) {
|
||||
let refId = 0
|
||||
function nextRef() {
|
||||
return 'Ref:' + String(refId++).padStart(3, '0')
|
||||
}
|
||||
const data = {
|
||||
version: {
|
||||
hostname: 'localhost',
|
||||
date: '2022-01-01',
|
||||
product_version: '8.2.1',
|
||||
product_brand: 'XCP-ng',
|
||||
build_number: 'release/yangtze/master/58',
|
||||
xapi_major: 1,
|
||||
xapi_minor: 20,
|
||||
export_vsn: 2,
|
||||
},
|
||||
objects: [],
|
||||
}
|
||||
const vm = defaultsDeep(
|
||||
{
|
||||
id: nextRef(),
|
||||
// you can pass a full snapshot and nothing more to do
|
||||
snapshot: vmSnapshot,
|
||||
},
|
||||
{
|
||||
// some data need a little more work to be usable
|
||||
// if they are not already in vm
|
||||
snapshot: {
|
||||
HVM_boot_params: {
|
||||
firmware,
|
||||
},
|
||||
memory_static_max: memory,
|
||||
memory_static_min: memory,
|
||||
memory_dynamic_max: memory,
|
||||
memory_dynamic_min: memory,
|
||||
other_config: {
|
||||
mac_seed: uuid(),
|
||||
},
|
||||
uuid: uuid(),
|
||||
VCPUs_at_startup: nCpus,
|
||||
VCPUs_max: nCpus,
|
||||
},
|
||||
},
|
||||
DEFAULT_VM
|
||||
)
|
||||
|
||||
data.objects.push(vm)
|
||||
const srObj = defaultsDeep(
|
||||
{
|
||||
class: 'SR',
|
||||
id: nextRef(),
|
||||
snapshot: sr,
|
||||
},
|
||||
{
|
||||
snapshot: {
|
||||
VDIs: [],
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
data.objects.push(srObj)
|
||||
assert.strictEqual(vhds.length, vdis.length)
|
||||
for (let index = 0; index < vhds.length; index++) {
|
||||
const userdevice = index + 1
|
||||
const vhd = vhds[index]
|
||||
const alignedSize = Math.ceil(vdis[index].virtual_size / XVA_DISK_CHUNK_LENGTH) * XVA_DISK_CHUNK_LENGTH
|
||||
const vdi = defaultsDeep(
|
||||
{
|
||||
id: nextRef(),
|
||||
// overwrite SR from an opaque ref to a ref:
|
||||
snapshot: { ...vdis[index], SR: srObj.id, virtual_size: alignedSize },
|
||||
},
|
||||
{
|
||||
snapshot: {
|
||||
uuid: uuid(),
|
||||
},
|
||||
},
|
||||
DEFAULT_VDI
|
||||
)
|
||||
|
||||
data.objects.push(vdi)
|
||||
srObj.snapshot.VDIs.push(vdi.id)
|
||||
vhd.ref = vdi.id
|
||||
|
||||
const vbd = defaultsDeep(
|
||||
{
|
||||
id: nextRef(),
|
||||
snapshot: {
|
||||
device: `xvd${String.fromCharCode('a'.charCodeAt(0) + index)}`,
|
||||
uuid: uuid(),
|
||||
userdevice,
|
||||
VM: vm.id,
|
||||
VDI: vdi.id,
|
||||
},
|
||||
},
|
||||
DEFAULT_VBD
|
||||
)
|
||||
data.objects.push(vbd)
|
||||
vdi.snapshot.vbds.push(vbd.id)
|
||||
vm.snapshot.VBDs.push(vbd.id)
|
||||
}
|
||||
|
||||
if (network && networks?.length) {
|
||||
const networkObj = defaultsDeep(
|
||||
{
|
||||
class: 'network',
|
||||
id: nextRef(),
|
||||
snapshot: network,
|
||||
},
|
||||
{
|
||||
snapshot: {
|
||||
vifs: [],
|
||||
},
|
||||
}
|
||||
)
|
||||
data.objects.push(networkObj)
|
||||
let vifIndex = 0
|
||||
for (const sourceNetwork of networks) {
|
||||
const vif = defaultsDeep(
|
||||
{
|
||||
id: nextRef(),
|
||||
snapshot: {
|
||||
device: ++vifIndex,
|
||||
MAC: sourceNetwork.macAddress,
|
||||
MAC_autogenerated: sourceNetwork.isGenerated,
|
||||
uuid: uuid(),
|
||||
VM: vm.id,
|
||||
network: networkObj.id,
|
||||
},
|
||||
},
|
||||
DEFAULT_VIF
|
||||
)
|
||||
data.objects.push(vif)
|
||||
networkObj.snapshot.vifs.push(vif.id)
|
||||
}
|
||||
}
|
||||
const xml = toOvaXml(data)
|
||||
await fromCallback.call(pack, pack.entry, { name: `ova.xml` }, xml)
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
import { isNotEmptyRef } from './_isNotEmptyRef.mjs'
|
||||
import { importVm } from './importVm.mjs'
|
||||
|
||||
export async function importVdi(vdi, vhd, xapi, sr) {
|
||||
// create a fake VM
|
||||
const vmRef = await importVm(
|
||||
{
|
||||
name_label: `[xva-disp-import]${vdi.name_label}`,
|
||||
memory: 1024 * 1024 * 32,
|
||||
nCpus: 1,
|
||||
firmware: 'bios',
|
||||
vdis: [vdi],
|
||||
vhds: [vhd],
|
||||
},
|
||||
xapi,
|
||||
sr
|
||||
)
|
||||
// wait for the VM to be loaded if necessary
|
||||
xapi.getObject(vmRef, undefined) ?? (await xapi.waitObject(vmRef))
|
||||
|
||||
const vbdRefs = await xapi.getField('VM', vmRef, 'VBDs')
|
||||
// get the disk
|
||||
const disks = { __proto__: null }
|
||||
;(await xapi.getRecords('VBD', vbdRefs)).forEach(vbd => {
|
||||
if (vbd.type === 'Disk' && isNotEmptyRef(vbd.VDI)) {
|
||||
disks[vbd.VDI] = true
|
||||
}
|
||||
})
|
||||
// destroy the VM and VBD
|
||||
await xapi.call('VM.destroy', vmRef)
|
||||
return await xapi.getRecord('VDI', Object.keys(disks)[0])
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
import tar from 'tar-stream'
|
||||
|
||||
import writeOvaXml from './_writeOvaXml.mjs'
|
||||
import writeDisk from './_writeDisk.mjs'
|
||||
|
||||
export async function importVm(vm, xapi, sr, network) {
|
||||
const pack = tar.pack()
|
||||
const taskRef = await xapi.task_create('VM import')
|
||||
const query = {
|
||||
sr_id: sr.$ref,
|
||||
}
|
||||
|
||||
const promise = xapi
|
||||
.putResource(pack, '/import/', {
|
||||
query,
|
||||
task: taskRef,
|
||||
})
|
||||
.catch(err => console.error(err))
|
||||
|
||||
await writeOvaXml(pack, vm, { sr, network })
|
||||
for (const vhd of vm.vhds) {
|
||||
await writeDisk(pack, vhd, vhd.ref)
|
||||
}
|
||||
pack.finalize()
|
||||
const str = await promise
|
||||
const matches = /OpaqueRef:[0-9a-z-]+/.exec(str)
|
||||
if (!matches) {
|
||||
const error = new Error(`no opaque ref found in ${str}`)
|
||||
throw error
|
||||
}
|
||||
return matches[0]
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"name": "@xen-orchestra/xva",
|
||||
"version": "1.0.2",
|
||||
"main": "index.js",
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"private": false,
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xva",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/xva",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/read-chunk": "^1.2.0",
|
||||
"hash-wasm": "^4.11.0",
|
||||
"lodash.defaultsdeep": "^4.6.1",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"tar-stream": "^3.1.6",
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
export const DEFAULT_VBD = {
|
||||
class: 'VBD',
|
||||
snapshot: {
|
||||
allowed_operations: [],
|
||||
bootable: true, // @todo : fix it
|
||||
current_operations: {},
|
||||
currently_attached: false,
|
||||
empty: false,
|
||||
metrics: 'OpaqueRef:NULL',
|
||||
mode: 'RW',
|
||||
other_config: {},
|
||||
qos_algorithm_params: {},
|
||||
qos_algorithm_type: '',
|
||||
qos_supported_algorithms: [],
|
||||
runtime_properties: {},
|
||||
status_code: 0,
|
||||
status_detail: '',
|
||||
storage_lock: false,
|
||||
type: 'Disk',
|
||||
unpluggable: false,
|
||||
},
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
export const DEFAULT_VDI = {
|
||||
class: 'VDI',
|
||||
snapshot: {
|
||||
allow_caching: false,
|
||||
cbt_enabled: false,
|
||||
descriptionLabel: 'description',
|
||||
is_a_snapshot: false,
|
||||
managed: true,
|
||||
metrics: 'OpaqueRef:NULL',
|
||||
missing: false,
|
||||
name_label: 'name_label',
|
||||
on_boot: 'persist',
|
||||
other_config: {},
|
||||
parent: 'OpaqueRef:NULL',
|
||||
physical_utilisation: 1024 * 1024,
|
||||
read_only: false,
|
||||
sharable: false,
|
||||
snapshot_of: 'OpaqueRef:NULL',
|
||||
snapshots: [],
|
||||
SR: 'OpaqueRef:NULL',
|
||||
storage_lock: false,
|
||||
tags: [],
|
||||
type: 'user',
|
||||
uuid: '',
|
||||
vbds: [],
|
||||
virtual_size: 0,
|
||||
xenstore_data: {},
|
||||
},
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
export const DEFAULT_VIF = {
|
||||
class: 'VIF',
|
||||
snapshot: {
|
||||
allowed_operations: [],
|
||||
currently_attached: false,
|
||||
current_operations: {},
|
||||
ipv4_addresses: [],
|
||||
ipv4_allowed: [],
|
||||
ipv4_configuration_mode: 'None',
|
||||
ipv4_gateway: '',
|
||||
ipv6_addresses: [],
|
||||
ipv6_allowed: [],
|
||||
ipv6_configuration_mode: 'None',
|
||||
ipv6_gateway: '',
|
||||
locking_mode: 'network_default',
|
||||
MTU: 1500,
|
||||
metrics: 'OpaqueRef:NULL',
|
||||
other_config: {},
|
||||
qos_algorithm_params: {},
|
||||
qos_algorithm_type: '',
|
||||
qos_supported_algorithms: [],
|
||||
runtime_properties: {},
|
||||
status_code: 0,
|
||||
status_detail: '',
|
||||
},
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
export const DEFAULT_VM = {
|
||||
class: 'VM',
|
||||
id: null,
|
||||
snapshot: {
|
||||
actions_after_crash: 'restart',
|
||||
actions_after_reboot: 'restart',
|
||||
actions_after_shutdown: 'destroy',
|
||||
affinity: 'Ref:53',
|
||||
allowed_operations: [],
|
||||
// appliance:'OpaqueRef:NULL',
|
||||
attached_PCIs: [],
|
||||
blobs: {},
|
||||
blocked_operations: {},
|
||||
children: [],
|
||||
consoles: [],
|
||||
crash_dumps: [],
|
||||
current_operations: {},
|
||||
domain_type: 'hvm',
|
||||
domarch: '',
|
||||
domid: -1,
|
||||
generation_id: '',
|
||||
guest_metrics: 'Ref:53',
|
||||
ha_always_run: false,
|
||||
ha_restart_priority: '',
|
||||
hardware_platform_version: 0,
|
||||
has_vendor_device: false,
|
||||
HVM_boot_params: {
|
||||
firmware: 'bios',
|
||||
order: 'dcn',
|
||||
},
|
||||
HVM_boot_policy: 'BIOS order',
|
||||
HVM_shadow_multiplier: 1,
|
||||
is_a_template: false,
|
||||
is_control_domain: false,
|
||||
is_default_template: false,
|
||||
is_snapshot_from_vmpp: false,
|
||||
is_vmss_snapshot: false,
|
||||
last_booted_record: '',
|
||||
memory_dynamic_max: 1,
|
||||
memory_dynamic_min: 1,
|
||||
memory_overhead: 11534336,
|
||||
memory_static_max: 1,
|
||||
memory_static_min: 1,
|
||||
memory_target: 0,
|
||||
metrics: 'OpaqueRef:NULL',
|
||||
name_label: 'from xva',
|
||||
NVRAM: {},
|
||||
name_description: ' from xva',
|
||||
order: 0,
|
||||
other_config: {
|
||||
base_template_name: 'Other install media',
|
||||
// mac_seed,
|
||||
'install-methods': 'cdrom',
|
||||
},
|
||||
parent: 'OpaqueRef:NULL',
|
||||
PCI_bus: '',
|
||||
platform: {
|
||||
timeoffset: 1,
|
||||
'device-model': 'qemu-upstream-compat',
|
||||
secureboot: 'false',
|
||||
hpet: 'true',
|
||||
nx: 'true',
|
||||
pae: 'true',
|
||||
apic: 'true',
|
||||
viridian: 'true',
|
||||
acpi: 1,
|
||||
},
|
||||
power_state: 'halted',
|
||||
// protection_policy:'OpaqueRef:NULL',
|
||||
PV_args: '',
|
||||
PV_bootloader_args: '',
|
||||
PV_bootloader: '',
|
||||
PV_kernel: '',
|
||||
PV_legacy_args: '',
|
||||
PV_ramdisk: '',
|
||||
recommendations: '',
|
||||
reference_label: 'other-install-media',
|
||||
requires_reboot: false,
|
||||
resident_on: 'Ref:53',
|
||||
// scheduled_to_be_resident_on:'OpaqueRef:NULL',
|
||||
shutdown_delay: 0,
|
||||
// snapshot_schedule: 'OpaqueRef:NULL',
|
||||
snapshot_info: {},
|
||||
snapshot_metadata: '',
|
||||
snapshot_of: 'OpaqueRef:NULL',
|
||||
snapshot_time: new Date(0),
|
||||
snapshots: [],
|
||||
start_delay: 0,
|
||||
// suspend_VDI:'OpaqueRef:NULL',
|
||||
// suspend_SR:'OpaqueRef:NULL',
|
||||
tags: [],
|
||||
transportable_snapshot_id: '',
|
||||
// uuid,
|
||||
user_version: 1,
|
||||
VBDs: [],
|
||||
VCPUs_at_startup: 1,
|
||||
VCPUs_max: 1,
|
||||
VCPUs_params: {},
|
||||
version: 0,
|
||||
VGPUs: [],
|
||||
VIFs: [],
|
||||
VTPMs: [],
|
||||
VUSBs: [],
|
||||
xenstore_data: {},
|
||||
},
|
||||
}
|
||||
87
CHANGELOG.md
87
CHANGELOG.md
@@ -1,96 +1,17 @@
|
||||
# ChangeLog
|
||||
|
||||
## **5.91.1** (2024-02-06)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Import/VMWare] Fix `Error: task has been destroyed before completion` with XVA import [Forum#70513](https://xcp-ng.org/forum/post/70513)
|
||||
- [Import/VM] Fix `UUID_INVALID(VM, OpaqueRef:...)` error when importing from URL
|
||||
- [Proxies] Fix `xapi.getOrWaitObject is not a function` is not a function during deployment
|
||||
- [REST API] Fix empty object's tasks list
|
||||
- [REST API] Fix incorrect `href` in `/:collection/:object/tasks`
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/immutable-backups 1.0.1
|
||||
- @xen-orchestra/xva 1.0.2
|
||||
- xo-server 5.135.1
|
||||
|
||||
## **5.91.0** (2024-01-31)
|
||||
|
||||
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Import/VMWare] Speed up import and make all imports thin [#7323](https://github.com/vatesfr/xen-orchestra/issues/7323)
|
||||
- [Backup] Implement Backup Repository immutability (PR [#6928](https://github.com/vatesfr/xen-orchestra/pull/6928))
|
||||
- [REST API] New pool action: `emergency_shutdown`, it suspends all the VMs and then shuts down all the host [#7277](https://github.com/vatesfr/xen-orchestra/issues/7277) (PR [#7279](https://github.com/vatesfr/xen-orchestra/pull/7279))
|
||||
- [REST API] New pool action: `create_vm` [#6749](https://github.com/vatesfr/xen-orchestra/issues/6749)
|
||||
- [Pool/Advanced] Ability to do a `Rolling Pool Reboot` (Enterprise plans) [#6885](https://github.com/vatesfr/xen-orchestra/issues/6885)
|
||||
- [Tags] Admins can create colored tags (PR [#7262](https://github.com/vatesfr/xen-orchestra/pull/7262))
|
||||
- [VM] Custom notes [#5792](https://github.com/vatesfr/xen-orchestra/issues/5792) (PR [#7322](https://github.com/vatesfr/xen-orchestra/pull/7322))(PRs [#7243](https://github.com/vatesfr/xen-orchestra/pull/7243), [#7242](https://github.com/vatesfr/xen-orchestra/pull/7242))
|
||||
- [Plugin/load-balancer] Limit concurrent VM migrations to 2 (configurable) to avoid long paused VMs [#7084](https://github.com/vatesfr/xen-orchestra/issues/7084) (PR [#7297](https://github.com/vatesfr/xen-orchestra/pull/7297))
|
||||
- [SR] Possibility to create SMB shared SR [#991](https://github.com/vatesfr/xen-orchestra/issues/991) (PR [#7330](https://github.com/vatesfr/xen-orchestra/pull/7330))
|
||||
- [Tags] Add tooltips on `xo:no-bak` and `xo:notify-on-snapshot` tags (PR [#7335](https://github.com/vatesfr/xen-orchestra/pull/7335))
|
||||
- [Host/Reboot] Confirmation modal to reboot an updated slave host if the master is not [#7059](https://github.com/vatesfr/xen-orchestra/issues/7059) (PR [#7293](https://github.com/vatesfr/xen-orchestra/pull/7293))
|
||||
- [Pool/Host] Add a warning if hosts do not have the same version within a pool [#7059](https://github.com/vatesfr/xen-orchestra/issues/7059) (PR [#7280](https://github.com/vatesfr/xen-orchestra/pull/7280))
|
||||
- [VM/Advanced] Admins can change VM creator [Forum#7313](https://xcp-ng.org/forum/topic/7313/change-created-by-and-date-information) (PR [#7276](https://github.com/vatesfr/xen-orchestra/pull/7276))
|
||||
- [XOSTOR] Allow user to ignore file systems at storage creation (PR[#7338](https://github.com/vatesfr/xen-orchestra/pull/7338))
|
||||
- [Settings/Logs] Transform objects UUIDs and OpaqueRefs into clickable links, leading to the corresponding object page (PR [#7300](https://github.com/vatesfr/xen-orchestra/pull/7300))
|
||||
- [Pool/patches] Disable Rolling Pool Update button if host is alone in its pool [#6415](https://github.com/vatesfr/xen-orchestra/issues/6415) (PR [#7286](https://github.com/vatesfr/xen-orchestra/pull/7286))
|
||||
## **next**
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [xo-cli] Supports NDJSON response for the `rest get` command (it also respects the `--json` flag) [Forum#69326](https://xcp-ng.org/forum/post/69326)
|
||||
- [Settings/Logs] Use GitHub issue form with pre-filled fields when reporting a bug [#7142](https://github.com/vatesfr/xen-orchestra/issues/7142) (PR [#7274](https://github.com/vatesfr/xen-orchestra/pull/7274))
|
||||
- [Tasks] Hide `/rrd_updates` tasks by default
|
||||
- [Sign in] Support _Remember me_ feature with external providers (PR [#7298](https://github.com/vatesfr/xen-orchestra/pull/7298))
|
||||
- [Plugins] Loading, or unloading, will respectively enable, or disable, _Auto-load at server start_, this should lead to least surprising behaviors (PR [#7317](https://github.com/vatesfr/xen-orchestra/pull/7317))
|
||||
- [Backup/Restore] Show whether the memory was backed up (PR [#7315](https://github.com/vatesfr/xen-orchestra/pull/7315))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Proxies] Fix `this.getObject` is not a function during deployment
|
||||
- [Settings/Logs] Fix `sr.getAllUnhealthyVdiChainsLength: not enough permissions` error with non-admin users (PR [#7265](https://github.com/vatesfr/xen-orchestra/pull/7265))
|
||||
- [Settings/Logs] Fix `proxy.getAll: not enough permissions` error with non-admin users (PR [#7249](https://github.com/vatesfr/xen-orchestra/pull/7249))
|
||||
- [Replication/Health Check] Fix `healthCheckVm.add_tag is not a function` error [Forum#69156](https://xcp-ng.org/forum/post/69156)
|
||||
- [Plugin/load-balancer] Prevent unwanted migrations to hosts with low free memory (PR [#7288](https://github.com/vatesfr/xen-orchestra/pull/7288))
|
||||
- Avoid unnecessary `pool.add_to_other_config: Duplicate key` error in XAPI log [Forum#68761](https://xcp-ng.org/forum/post/68761)
|
||||
- [Jobs] Reset parameters when editing method to avoid invalid parameters on execution [Forum#69299](https://xcp-ng.org/forum/post/69299)
|
||||
- [Metadata Backup] Fix `ENOENT` error when restoring an _XO Config_ backup [Forum#68999](https://xcp-ng.org/forum/post/68999)
|
||||
- [REST API] Fix `/backup/log/<id>` which was broken by the `/backups` to `/backup` renaming [Forum#69426](https://xcp-ng.org/forum/post/69426)
|
||||
- [Backup/Restore] Fix unnecessary pool selector in XO config backup restore modal [Forum#8130](https://xcp-ng.org/forum/topic/8130/xo-configbackup-restore) (PR [#7287](https://github.com/vatesfr/xen-orchestra/pull/7287))
|
||||
- [File restore] Fix potential race condition in partition mount/unmount (PR [#7312](https://github.com/vatesfr/xen-orchestra/pull/7312))
|
||||
- [Modal] Fix opened modal not closing when navigating to another route/URL (PR [#7301](https://github.com/vatesfr/xen-orchestra/pull/7301))
|
||||
- [Backup/Restore] Don't count memory as a key (i.e. complete) disk [Forum#8212](https://xcp-ng.org/forum/post/69591) (PR [#7315](https://github.com/vatesfr/xen-orchestra/pull/7315))
|
||||
- [PIF] Fix IPv4 reconfiguration only worked when the IPv4 mode was updated (PR [#7324](https://github.com/vatesfr/xen-orchestra/pull/7324))
|
||||
- [Backup/Smart mode] Make preview correctly ignoring `xo:no-bak` tags [Forum#69797](https://xcp-ng.org/forum/post/69797) (PR [#7331](https://github.com/vatesfr/xen-orchestra/pull/7331))
|
||||
- [Pool/Advanced] Show pool backup/migration network even if they no longer exist (PR [#7303](https://github.com/vatesfr/xen-orchestra/pull/7303))
|
||||
- [Import/disk] Couldn't update 'name' field when importing from a URL [#7326](https://github.com/vatesfr/xen-orchestra/issues/7326) (PR [#7332](https://github.com/vatesfr/xen-orchestra/pull/7332))
|
||||
- [Pool/patches] Disable Rolling Pool Update button if some powered up VMs are using a non-shared storage [#6415](https://github.com/vatesfr/xen-orchestra/issues/6415) (PR [#7294](https://github.com/vatesfr/xen-orchestra/pull/7294))
|
||||
|
||||
## Released packages
|
||||
|
||||
- @vates/decorate-with 2.1.0
|
||||
- @vates/fuse-vhd 2.1.0
|
||||
- xen-api 2.0.1
|
||||
- @xen-orchestra/xapi 4.2.0
|
||||
- @xen-orchestra/self-signed 0.2.0
|
||||
- xo-server-load-balancer 0.8.0
|
||||
- @xen-orchestra/vmware-explorer 0.4.0
|
||||
- @xen-orchestra/xva 1.0.0
|
||||
- @xen-orchestra/fs 4.1.4
|
||||
- @xen-orchestra/backups 0.44.6
|
||||
- @xen-orchestra/immutable-backups 1.0.0
|
||||
- @xen-orchestra/proxy 0.26.45
|
||||
- xo-cli 0.26.0
|
||||
- xo-server 5.135.0
|
||||
- xo-web 5.136.0
|
||||
- xo-cli 0.24.0
|
||||
|
||||
## **5.90.0** (2023-12-29)
|
||||
|
||||
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
|
||||
|
||||
### Highlights
|
||||
|
||||
- [VDI] Create XAPI task during NBD export (PR [#7228](https://github.com/vatesfr/xen-orchestra/pull/7228))
|
||||
@@ -141,6 +62,8 @@
|
||||
|
||||
## **5.89.0** (2023-11-30)
|
||||
|
||||
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Restore] Show source remote and restoration time on a restored VM (PR [#7186](https://github.com/vatesfr/xen-orchestra/pull/7186))
|
||||
@@ -191,6 +114,8 @@
|
||||
|
||||
## **5.88.2** (2023-11-13)
|
||||
|
||||
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
|
||||
|
||||
### Enhancement
|
||||
|
||||
- [REST API] Add `users` collection
|
||||
|
||||
@@ -7,13 +7,38 @@
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Settings/Logs] Use GitHub issue form with pre-filled fields when reporting a bug [#7142](https://github.com/vatesfr/xen-orchestra/issues/7142) (PR [#7274](https://github.com/vatesfr/xen-orchestra/pull/7274))
|
||||
- [REST API] New pool action: `emergency_shutdown`, it suspends all the VMs and then shuts down all the host [#7277](https://github.com/vatesfr/xen-orchestra/issues/7277) (PR [#7279](https://github.com/vatesfr/xen-orchestra/pull/7279))
|
||||
- [Tasks] Hide `/rrd_updates` tasks by default
|
||||
- [Sign in] Support _Remember me_ feature with external providers (PR [#7298](https://github.com/vatesfr/xen-orchestra/pull/7298))
|
||||
- [Pool/Host] Add a warning if hosts do not have the same version within a pool [#7059](https://github.com/vatesfr/xen-orchestra/issues/7059) (PR [#7280](https://github.com/vatesfr/xen-orchestra/pull/7280))
|
||||
- [Plugins] Loading, or unloading, will respectively enable, or disable, _Auto-load at server start_, this should lead to least surprising behaviors (PR [#7317](https://github.com/vatesfr/xen-orchestra/pull/7317))
|
||||
- [VM/Advanced] Admin can change VM creator [Forum#7313](https://xcp-ng.org/forum/topic/7313/change-created-by-and-date-information) (PR [#7276](https://github.com/vatesfr/xen-orchestra/pull/7276))
|
||||
- [Host/Reboot] Confirmation modal to reboot an updated slave host if the master is not [#7059](https://github.com/vatesfr/xen-orchestra/issues/7059) (PR [#7293](https://github.com/vatesfr/xen-orchestra/pull/7293))
|
||||
- [Backup/Restore] Show whether the memory was backed up (PR [#7315](https://github.com/vatesfr/xen-orchestra/pull/7315))
|
||||
- [Plugin/load-balancer] Limit concurrent VM migrations to 2 (configurable) to avoid long paused VMs [#7084](https://github.com/vatesfr/xen-orchestra/issues/7084) (PR [#7297](https://github.com/vatesfr/xen-orchestra/pull/7297))
|
||||
- [Plugin/load-balancer] A parameter was added in performance mode to balance VMs on hosts depending on their number of vCPU, when it does not cause performance issues. [#5389](https://github.com/vatesfr/xen-orchestra/issues/5389) (PR [#7333](https://github.com/vatesfr/xen-orchestra/pull/7333))
|
||||
- [Tags] Admin can create colored tags (PR [#7262](https://github.com/vatesfr/xen-orchestra/pull/7262))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [Import/VMWare] Fix `(Failure \"Expected string, got 'I(0)'\")` (PR [#7361](https://github.com/vatesfr/xen-orchestra/issues/7361))
|
||||
- [Plugin/load-balancer] Fixing `TypeError: Cannot read properties of undefined (reading 'high')` happening when trying to optimize a host with performance plan [#7359](https://github.com/vatesfr/xen-orchestra/issues/7359) (PR [#7362](https://github.com/vatesfr/xen-orchestra/pull/7362))
|
||||
- Changing the number of displayed items per page should send back to the first page [#7350](https://github.com/vatesfr/xen-orchestra/issues/7350)
|
||||
- [Proxies] Fix `this.getObject` is not a function during deployment
|
||||
- [Settings/Logs] Fix `sr.getAllUnhealthyVdiChainsLength: not enough permissions` error with non-admin users (PR [#7265](https://github.com/vatesfr/xen-orchestra/pull/7265))
|
||||
- [Settings/Logs] Fix `proxy.getAll: not enough permissions` error with non-admin users (PR [#7249](https://github.com/vatesfr/xen-orchestra/pull/7249))
|
||||
- [Replication/Health Check] Fix `healthCheckVm.add_tag is not a function` error [Forum#69156](https://xcp-ng.org/forum/post/69156)
|
||||
- [Plugin/load-balancer] Prevent unwanted migrations to hosts with low free memory (PR [#7288](https://github.com/vatesfr/xen-orchestra/pull/7288))
|
||||
- Avoid unnecessary `pool.add_to_other_config: Duplicate key` error in XAPI log [Forum#68761](https://xcp-ng.org/forum/post/68761)
|
||||
- [Jobs] Reset parameters when editing method to avoid invalid parameters on execution [Forum#69299](https://xcp-ng.org/forum/post/69299)
|
||||
- [Metadata Backup] Fix `ENOENT` error when restoring an _XO Config_ backup [Forum#68999](https://xcp-ng.org/forum/post/68999)
|
||||
- [REST API] Fix `/backup/log/<id>` which was broken by the `/backups` to `/backup` renaming [Forum#69426](https://xcp-ng.org/forum/post/69426)
|
||||
- [Backup/Restore] Fix unnecessary pool selector in XO config backup restore modal [Forum#8130](https://xcp-ng.org/forum/topic/8130/xo-configbackup-restore) (PR [#7287](https://github.com/vatesfr/xen-orchestra/pull/7287))
|
||||
- [File restore] Fix potential race condition in partition mount/unmount (PR [#7312](https://github.com/vatesfr/xen-orchestra/pull/7312))
|
||||
- [Modal] Fix opened modal not closing when navigating to another route/URL (PR [#7301](https://github.com/vatesfr/xen-orchestra/pull/7301))
|
||||
- [Backup/Restore] Don't count memory as a key (i.e. complete) disk [Forum#8212](https://xcp-ng.org/forum/post/69591) (PR [#7315](https://github.com/vatesfr/xen-orchestra/pull/7315))
|
||||
- [Pool/patches] Disable Rolling Pool Update button if host is alone in its pool [#6415](https://github.com/vatesfr/xen-orchestra/issues/6415) (PR [#7286](https://github.com/vatesfr/xen-orchestra/pull/7286))
|
||||
- [PIF] Fix IPv4 reconfiguration only worked when the IPv4 mode was updated (PR [#7324](https://github.com/vatesfr/xen-orchestra/pull/7324))
|
||||
|
||||
### Packages to release
|
||||
|
||||
@@ -31,8 +56,14 @@
|
||||
|
||||
<!--packages-start-->
|
||||
|
||||
- xo-server patch
|
||||
- xo-server-load-balancer patch
|
||||
- xo-web patch
|
||||
- @vates/decorate-with minor
|
||||
- @vates/fuse-vhd patch
|
||||
- @xen-orchestra/backups patch
|
||||
- @xen-orchestra/self-signed minor
|
||||
- @xen-orchestra/xapi minor
|
||||
- xen-api patch
|
||||
- xo-server minor
|
||||
- xo-server-load-balancer minor
|
||||
- xo-web minor
|
||||
|
||||
<!--packages-end-->
|
||||
|
||||
@@ -279,45 +279,12 @@ The following query parameters are supported to customize the created VDI:
|
||||
|
||||
To see the actions available on a given object, get the collection at `/rest/v0/<type>/<uuid>/actions`.
|
||||
|
||||
The field `params` contains the [JSON schema](https://json-schema.org/) for the parameters. Use `fields=params` to see it when fetching the collection.
|
||||
|
||||
For example, to list all actions on a given VM:
|
||||
|
||||
```sh
|
||||
curl \
|
||||
-b authenticationToken=KQxQdm2vMiv7jBIK0hgkmgxKzemd8wSJ7ugFGKFkTbs \
|
||||
'https://xo.example.org/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions?fields=params'
|
||||
```
|
||||
|
||||
Example response:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"href": "/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions/clean_reboot"
|
||||
},
|
||||
{
|
||||
"href": "/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions/clean_shutdown"
|
||||
},
|
||||
{
|
||||
"href": "/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions/hard_reboot"
|
||||
},
|
||||
{
|
||||
"href": "/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions/hard_shutdown"
|
||||
},
|
||||
{
|
||||
"params": {
|
||||
"name_label": {
|
||||
"type": "string",
|
||||
"optional": true
|
||||
}
|
||||
},
|
||||
"href": "/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions/snapshot"
|
||||
},
|
||||
{
|
||||
"href": "/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions/start"
|
||||
}
|
||||
]
|
||||
'https://xo.example.org/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions'
|
||||
```
|
||||
|
||||
### Start an action
|
||||
|
||||
@@ -96,6 +96,7 @@
|
||||
"scripts": {
|
||||
"build": "turbo run build --scope xo-server --scope xo-server-'*' --scope xo-web",
|
||||
"build:xo-lite": "turbo run build --scope @xen-orchestra/lite",
|
||||
"ci": "yarn && yarn build && yarn test-lint && yarn test-integration",
|
||||
"clean": "scripts/run-script.js --parallel clean",
|
||||
"dev": "scripts/run-script.js --parallel --concurrency 0 --verbose dev",
|
||||
"dev-test": "jest --bail --watch \"^(?!.*\\.integ\\.spec\\.js$)\"",
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"node": ">=10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^4.1.4",
|
||||
"@xen-orchestra/fs": "^4.1.3",
|
||||
"cli-progress": "^3.1.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
"@vates/read-chunk": "^1.2.0",
|
||||
"@vates/stream-reader": "^0.1.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/fs": "^4.1.4",
|
||||
"@xen-orchestra/fs": "^4.1.3",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"decorator-synchronized": "^0.6.0",
|
||||
@@ -33,7 +33,7 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@xen-orchestra/fs": "^4.1.4",
|
||||
"@xen-orchestra/fs": "^4.1.3",
|
||||
"execa": "^5.0.0",
|
||||
"get-stream": "^6.0.0",
|
||||
"rimraf": "^5.0.1",
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"human-format": "^1.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^2.0.1"
|
||||
"xen-api": "^2.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "xen-api",
|
||||
"version": "2.0.1",
|
||||
"version": "2.0.0",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
@@ -32,7 +32,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/coalesce-calls": "^0.1.0",
|
||||
"@vates/decorate-with": "^2.1.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"bind-property-descriptor": "^2.0.0",
|
||||
"blocked": "^1.2.1",
|
||||
"debug": "^4.0.1",
|
||||
@@ -52,7 +52,7 @@
|
||||
"xo-collection": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^18.7.0"
|
||||
"tap": "^16.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"plot": "gnuplot -p memory-test.gnu",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "xo-cli",
|
||||
"version": "0.26.0",
|
||||
"version": "0.24.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Basic CLI for Xen-Orchestra",
|
||||
"keywords": [
|
||||
|
||||
@@ -8,7 +8,6 @@ import { readChunk } from '@vates/read-chunk'
|
||||
import getopts from 'getopts'
|
||||
import hrp from 'http-request-plus'
|
||||
import merge from 'lodash/merge.js'
|
||||
import set from 'lodash/set.js'
|
||||
import split2 from 'split2'
|
||||
|
||||
import * as config from './config.mjs'
|
||||
@@ -30,10 +29,10 @@ function parseParams(args) {
|
||||
for (const arg of args) {
|
||||
const i = arg.indexOf('=')
|
||||
if (i === -1) {
|
||||
set(params, arg, '')
|
||||
params[arg] = ''
|
||||
} else {
|
||||
const value = arg.slice(i + 1)
|
||||
set(params, arg.slice(0, i), value.startsWith('json:') ? JSON.parse(value.slice(5)) : value)
|
||||
params[arg.slice(0, i)] = value.startsWith('json:') ? JSON.parse(value.slice(5)) : value
|
||||
}
|
||||
}
|
||||
return params
|
||||
@@ -167,7 +166,7 @@ export async function rest(args) {
|
||||
|
||||
return COMMANDS[command].call(
|
||||
{
|
||||
async exec(path, { query = {}, ...opts } = {}) {
|
||||
exec(path, { query = {}, ...opts } = {}) {
|
||||
const url = new URL(baseUrl)
|
||||
|
||||
const i = path.indexOf('?')
|
||||
@@ -186,17 +185,7 @@ export async function rest(args) {
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
return await hrp(url, merge({}, baseOpts, opts))
|
||||
} catch (error) {
|
||||
const { response } = error
|
||||
if (response === undefined) {
|
||||
throw error
|
||||
}
|
||||
|
||||
console.error(response.statusCode, response.statusMessage)
|
||||
throw await response.text()
|
||||
}
|
||||
return hrp(url, merge({}, baseOpts, opts))
|
||||
},
|
||||
json,
|
||||
},
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"removeComments": true,
|
||||
"sourceMap": true,
|
||||
"strictNullChecks": true,
|
||||
"target": "es2015",
|
||||
"target": "es2015"
|
||||
},
|
||||
"includes": "src/**/*",
|
||||
"includes": "src/**/*"
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-load-balancer",
|
||||
"version": "0.8.0",
|
||||
"version": "0.7.3",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Load balancer for XO-Server",
|
||||
"keywords": [
|
||||
@@ -24,7 +24,7 @@
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"engines": {
|
||||
"node": ">=7"
|
||||
"node": ">=12.9"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.6",
|
||||
|
||||
@@ -88,9 +88,20 @@ export const configurationSchema = {
|
||||
$type: 'Tag',
|
||||
},
|
||||
},
|
||||
|
||||
balanceVcpus: {
|
||||
type: 'boolean',
|
||||
title: 'Balance vCPUs',
|
||||
description: 'pre-position VMs on hosts to balance vCPU/CPU ratio (performance plan only)',
|
||||
},
|
||||
},
|
||||
|
||||
required: ['name', 'mode', 'pools'],
|
||||
// when UI will allow it, remove this anyOf and hide balanceVcpu option outside performance mode
|
||||
anyOf: [
|
||||
{ properties: { mode: { const: 'Performance mode' } } },
|
||||
{ properties: { balanceVcpus: { const: false } } },
|
||||
],
|
||||
},
|
||||
|
||||
minItems: 1,
|
||||
|
||||
@@ -45,25 +45,27 @@ export default class PerformancePlan extends Plan {
|
||||
toOptimizeOnly: true,
|
||||
})
|
||||
|
||||
if (!results) {
|
||||
return
|
||||
if (results) {
|
||||
const { averages, toOptimize } = results
|
||||
toOptimize.sort((a, b) => -this._sortHosts(a, b))
|
||||
for (const exceededHost of toOptimize) {
|
||||
const { id } = exceededHost
|
||||
|
||||
debug(`Try to optimize Host (${exceededHost.id}).`)
|
||||
const availableHosts = filter(hosts, host => host.id !== id)
|
||||
debug(`Available destinations: ${availableHosts.map(host => host.id)}.`)
|
||||
|
||||
// Search bests combinations for the worst host.
|
||||
await this._optimize({
|
||||
exceededHost,
|
||||
hosts: availableHosts,
|
||||
hostsAverages: averages,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const { averages, toOptimize } = results
|
||||
toOptimize.sort((a, b) => -this._sortHosts(a, b))
|
||||
for (const exceededHost of toOptimize) {
|
||||
const { id } = exceededHost
|
||||
|
||||
debug(`Try to optimize Host (${exceededHost.id}).`)
|
||||
const availableHosts = filter(hosts, host => host.id !== id)
|
||||
debug(`Available destinations: ${availableHosts.map(host => host.id)}.`)
|
||||
|
||||
// Search bests combinations for the worst host.
|
||||
await this._optimize({
|
||||
exceededHost,
|
||||
hosts: availableHosts,
|
||||
hostsAverages: averages,
|
||||
})
|
||||
if (this._balanceVcpus) {
|
||||
await this._processVcpuPrepositionning()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,7 +180,7 @@ export default class PerformancePlan extends Plan {
|
||||
const state = this._getThresholdState(exceededAverages)
|
||||
if (
|
||||
destinationAverages.cpu + vmAverages.cpu >= this._thresholds.cpu.low ||
|
||||
destinationAverages.memoryFree - vmAverages.memory <= this._thresholds.memoryFree.high ||
|
||||
destinationAverages.memoryFree - vmAverages.memory <= this._thresholds.memory.high ||
|
||||
(!state.cpu &&
|
||||
!state.memory &&
|
||||
(exceededAverages.cpu - vmAverages.cpu < destinationAverages.cpu + vmAverages.cpu ||
|
||||
|
||||
@@ -18,9 +18,12 @@ const LOW_THRESHOLD_FACTOR = 0.65
|
||||
const HIGH_THRESHOLD_MEMORY_FREE_FACTOR = 1.2
|
||||
const LOW_THRESHOLD_MEMORY_FREE_FACTOR = 1.5
|
||||
|
||||
const THRESHOLD_VCPU_RATIO = 0.9
|
||||
|
||||
const numberOrDefault = (value, def) => (value >= 0 ? value : def)
|
||||
|
||||
export const debugAffinity = str => debug(`anti-affinity: ${str}`)
|
||||
export const debugVcpuBalancing = str => debug(`vCPU balancing: ${str}`)
|
||||
|
||||
// ===================================================================
|
||||
// Averages.
|
||||
@@ -96,12 +99,18 @@ function setRealCpuAverageOfVms(vms, vmsAverages, nCpus) {
|
||||
|
||||
// ===================================================================
|
||||
|
||||
function vcpuPerCpuRatio(host) {
|
||||
return host.vcpuCount / host.cpuCount
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class Plan {
|
||||
constructor(
|
||||
xo,
|
||||
name,
|
||||
poolIds,
|
||||
{ excludedHosts, thresholds, antiAffinityTags = [] },
|
||||
{ excludedHosts, thresholds, balanceVcpus, antiAffinityTags = [] },
|
||||
globalOptions,
|
||||
concurrentMigrationLimiter
|
||||
) {
|
||||
@@ -119,6 +128,7 @@ export default class Plan {
|
||||
},
|
||||
}
|
||||
this._antiAffinityTags = antiAffinityTags
|
||||
this._balanceVcpus = balanceVcpus
|
||||
this._globalOptions = globalOptions
|
||||
this._concurrentMigrationLimiter = concurrentMigrationLimiter
|
||||
|
||||
@@ -276,6 +286,191 @@ export default class Plan {
|
||||
return vmsAverages
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
// vCPU pre-positionning helpers
|
||||
// ===================================================================
|
||||
|
||||
async _processVcpuPrepositionning() {
|
||||
const promises = []
|
||||
|
||||
const allHosts = await this._getHosts()
|
||||
if (allHosts.length <= 1) {
|
||||
return
|
||||
}
|
||||
const idToHost = keyBy(allHosts, 'id')
|
||||
const allVms = filter(this._getAllRunningVms(), vm => vm.$container in idToHost)
|
||||
const hostList = this._getVCPUHosts(allHosts, allVms)
|
||||
const idealVcpuPerCpuRatio =
|
||||
hostList.reduce((sum, host) => sum + host.vcpuCount, 0) / hostList.reduce((sum, host) => sum + host.cpuCount, 0)
|
||||
|
||||
debugVcpuBalancing('Try to apply vCPU prepositionning.')
|
||||
debugVcpuBalancing(`vCPU count per host: ${inspect(hostList, { depth: null })}`)
|
||||
debugVcpuBalancing(`Average vCPUs per CPU: ${idealVcpuPerCpuRatio}`)
|
||||
|
||||
// execute prepositionning only if vCPU/CPU ratios are different enough, to prevent executing too often
|
||||
const ratio = vcpuPerCpuRatio(minBy(hostList, vcpuPerCpuRatio)) / vcpuPerCpuRatio(maxBy(hostList, vcpuPerCpuRatio))
|
||||
if (ratio > THRESHOLD_VCPU_RATIO) {
|
||||
debugVcpuBalancing(`vCPU ratios not different enough : ${ratio}`)
|
||||
return
|
||||
}
|
||||
|
||||
const vmsAverages = await this._getVmsAverages(allVms, idToHost)
|
||||
const { averages: hostsAverages } = await this._getHostStatsAverages({ hosts: allHosts })
|
||||
|
||||
// 1. Find source host from which to migrate.
|
||||
const sources = sortBy(
|
||||
filter(hostList, host => (host.vcpuCount - 1) / host.cpuCount >= idealVcpuPerCpuRatio),
|
||||
[
|
||||
host => -vcpuPerCpuRatio(host),
|
||||
// Find host with the most memory used
|
||||
host => hostsAverages[host.id].memoryFree,
|
||||
]
|
||||
)
|
||||
debugVcpuBalancing(`Sources: ${inspect(sources, { depth: null })}`)
|
||||
|
||||
for (const sourceHost of sources) {
|
||||
let deltaSource = sourceHost.vcpuCount - sourceHost.cpuCount * idealVcpuPerCpuRatio
|
||||
// deltaSource = 0 has no guaranatee to be reachable, its value can be non-integer
|
||||
if (deltaSource < 1) {
|
||||
continue
|
||||
}
|
||||
|
||||
// 2. Find destination host.
|
||||
const destinations = sortBy(
|
||||
filter(hostList, host => host.id !== sourceHost.id && host.vcpuCount < host.cpuCount * idealVcpuPerCpuRatio),
|
||||
[
|
||||
// trying to avoid migrations between pools
|
||||
host => host.poolId === sourceHost.poolId,
|
||||
vcpuPerCpuRatio,
|
||||
host => -hostsAverages[host.id].memoryFree,
|
||||
]
|
||||
)
|
||||
debugVcpuBalancing(`Destinations : ${inspect(destinations, { depth: null })}`)
|
||||
|
||||
if (!destinations.length) {
|
||||
continue // Cannot find a valid destination.
|
||||
}
|
||||
|
||||
// Build VM list to migrate.
|
||||
const sourceVms = Object.values(sourceHost.vms)
|
||||
|
||||
// eslint-disable-next-line no-labels
|
||||
destinationLoop: for (const destinationHost of destinations) {
|
||||
debugVcpuBalancing(`Host candidate: ${sourceHost.id} -> ${destinationHost.id}`)
|
||||
|
||||
// calculating how many vCPUs source should give and how many destination should accept
|
||||
let deltaDestination = destinationHost.vcpuCount - destinationHost.cpuCount * idealVcpuPerCpuRatio
|
||||
|
||||
if (
|
||||
deltaDestination >= 0 ||
|
||||
hostsAverages[destinationHost.id].cpu >= this._thresholds.cpu.low ||
|
||||
hostsAverages[destinationHost.id].memoryFree <= this._thresholds.memoryFree.low
|
||||
) {
|
||||
continue
|
||||
}
|
||||
|
||||
// avoiding to migrate too much vCPUs for source or destination
|
||||
// deltaSource is positive, deltaDestination is negative, we check which one has greater absolute value
|
||||
let delta = deltaSource > -deltaDestination ? Math.ceil(-deltaDestination) : Math.ceil(deltaSource)
|
||||
const vms = sortBy(
|
||||
filter(
|
||||
sourceVms,
|
||||
vm => hostsAverages[destinationHost.id].memoryFree >= vmsAverages[vm.id].memory && vm.CPUs.number <= delta
|
||||
),
|
||||
[vm => -vm.CPUs.number]
|
||||
)
|
||||
|
||||
for (const vm of vms) {
|
||||
// migrate only if destination is vCPU-underloaded and if this does not cause performance issues
|
||||
if (
|
||||
vm.CPUs.number <= delta &&
|
||||
hostsAverages[destinationHost.id].cpu + vmsAverages[vm.id].cpu < this._thresholds.cpu.low &&
|
||||
hostsAverages[destinationHost.id].memoryFree - vmsAverages[vm.id].memory > this._thresholds.memoryFree.low
|
||||
) {
|
||||
const source = idToHost[sourceHost.id]
|
||||
const destination = idToHost[destinationHost.id]
|
||||
debugVcpuBalancing(
|
||||
`Migrate VM (${vm.id} "${vm.name_label}") with ${vm.CPUs.number} vCPU to Host (${destinationHost.id} "${destination.name_label}") from Host (${sourceHost.id} "${source.name_label}").`
|
||||
)
|
||||
// 3. Update tags and averages.
|
||||
// This update can change the source host for the next migration.
|
||||
sourceHost.vcpuCount -= vm.CPUs.number
|
||||
destinationHost.vcpuCount += vm.CPUs.number
|
||||
|
||||
const destinationAverages = hostsAverages[destinationHost.id]
|
||||
const vmAverages = vmsAverages[vm.id]
|
||||
|
||||
destinationAverages.cpu += vmAverages.cpu
|
||||
destinationAverages.memoryFree -= vmAverages.memory
|
||||
|
||||
delete sourceHost.vms[vm.id]
|
||||
|
||||
// 4. Migrate.
|
||||
const sourceXapi = this.xo.getXapi(source)
|
||||
promises.push(
|
||||
this._concurrentMigrationLimiter.call(
|
||||
sourceXapi,
|
||||
'migrateVm',
|
||||
vm._xapiId,
|
||||
this.xo.getXapi(destination),
|
||||
destination._xapiId
|
||||
)
|
||||
)
|
||||
debugVcpuBalancing(`vCPU count per host: ${inspect(hostList, { depth: null })}`)
|
||||
|
||||
// 5. Check if source host is still overloaded and if destination host is still underloaded
|
||||
deltaSource = sourceHost.vcpuCount - sourceHost.cpuCount * idealVcpuPerCpuRatio
|
||||
if (deltaSource < 1) {
|
||||
// eslint-disable-next-line no-labels
|
||||
break destinationLoop
|
||||
}
|
||||
deltaDestination = destinationHost.vcpuCount - destinationHost.cpuCount * idealVcpuPerCpuRatio
|
||||
if (deltaDestination >= 0) {
|
||||
break
|
||||
}
|
||||
delta = deltaSource > -deltaDestination ? Math.ceil(-deltaDestination) : Math.ceil(deltaSource)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return Promise.allSettled(promises)
|
||||
}
|
||||
|
||||
_getVCPUHosts(hosts, vms) {
|
||||
const idToHost = {}
|
||||
for (const host of hosts) {
|
||||
const taggedHost = (idToHost[host.id] = {
|
||||
id: host.id,
|
||||
poolId: host.$poolId,
|
||||
cpuCount: parseInt(host.CPUs.cpu_count),
|
||||
vcpuCount: 0,
|
||||
vms: {},
|
||||
})
|
||||
|
||||
// Hide properties when util.inspect is used.
|
||||
Object.defineProperties(taggedHost, {
|
||||
poolId: { enumerable: false },
|
||||
vms: { enumerable: false },
|
||||
})
|
||||
}
|
||||
|
||||
for (const vm of vms) {
|
||||
const hostId = vm.$container
|
||||
if (!(hostId in idToHost)) {
|
||||
continue
|
||||
}
|
||||
|
||||
const host = idToHost[hostId]
|
||||
host.vcpuCount += vm.CPUs.number
|
||||
|
||||
if (vm.xenTools && vm.tags.every(tag => !this._antiAffinityTags.includes(tag))) {
|
||||
host.vms[vm.id] = vm
|
||||
}
|
||||
}
|
||||
|
||||
return Object.values(idToHost)
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
// Anti-affinity helpers
|
||||
// ===================================================================
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
"@babel/plugin-proposal-decorators": "^7.4.0",
|
||||
"@babel/preset-env": "^7.1.6",
|
||||
"@iarna/toml": "^2.2.1",
|
||||
"@vates/decorate-with": "^2.1.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/parse-duration": "^0.1.1",
|
||||
"app-conf": "^2.3.0",
|
||||
"golike-defer": "^0.5.1",
|
||||
|
||||
@@ -285,45 +285,12 @@ The following query parameters are supported to customize the created VDI:
|
||||
|
||||
To see the actions available on a given object, get the collection at `/rest/v0/<type>/<uuid>/actions`.
|
||||
|
||||
The field `params` contains the [JSON schema](https://json-schema.org/) for the parameters. Use `fields=params` to see it when fetching the collection.
|
||||
|
||||
For example, to list all actions on a given VM:
|
||||
|
||||
```sh
|
||||
curl \
|
||||
-b authenticationToken=KQxQdm2vMiv7jBIK0hgkmgxKzemd8wSJ7ugFGKFkTbs \
|
||||
'https://xo.company.lan/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions?fields=params'
|
||||
```
|
||||
|
||||
Example response:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"href": "/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions/clean_reboot"
|
||||
},
|
||||
{
|
||||
"href": "/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions/clean_shutdown"
|
||||
},
|
||||
{
|
||||
"href": "/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions/hard_reboot"
|
||||
},
|
||||
{
|
||||
"href": "/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions/hard_shutdown"
|
||||
},
|
||||
{
|
||||
"params": {
|
||||
"name_label": {
|
||||
"type": "string",
|
||||
"optional": true
|
||||
}
|
||||
},
|
||||
"href": "/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions/snapshot"
|
||||
},
|
||||
{
|
||||
"href": "/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions/start"
|
||||
}
|
||||
]
|
||||
'https://xo.company.lan/rest/v0/vms/770aa52a-fd42-8faf-f167-8c5c4a237cac/actions'
|
||||
```
|
||||
|
||||
### Start an action
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "xo-server",
|
||||
"version": "5.135.1",
|
||||
"version": "5.132.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Server part of Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -32,7 +32,7 @@
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/cached-dns.lookup": "^1.0.0",
|
||||
"@vates/compose": "^2.1.0",
|
||||
"@vates/decorate-with": "^2.1.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/disposable": "^0.1.5",
|
||||
"@vates/event-listeners-manager": "^1.0.1",
|
||||
"@vates/multi-key-map": "^0.2.0",
|
||||
@@ -41,19 +41,18 @@
|
||||
"@vates/predicates": "^1.1.0",
|
||||
"@vates/read-chunk": "^1.2.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.44.6",
|
||||
"@xen-orchestra/backups": "^0.44.3",
|
||||
"@xen-orchestra/cron": "^1.0.6",
|
||||
"@xen-orchestra/defined": "^0.0.1",
|
||||
"@xen-orchestra/emit-async": "^1.0.0",
|
||||
"@xen-orchestra/fs": "^4.1.4",
|
||||
"@xen-orchestra/fs": "^4.1.3",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"@xen-orchestra/mixin": "^0.1.0",
|
||||
"@xen-orchestra/mixins": "^0.14.0",
|
||||
"@xen-orchestra/self-signed": "^0.2.0",
|
||||
"@xen-orchestra/self-signed": "^0.1.3",
|
||||
"@xen-orchestra/template": "^0.1.0",
|
||||
"@xen-orchestra/vmware-explorer": "^0.4.0",
|
||||
"@xen-orchestra/xapi": "^4.2.0",
|
||||
"@xen-orchestra/xva": "^1.0.2",
|
||||
"@xen-orchestra/vmware-explorer": "^0.3.1",
|
||||
"@xen-orchestra/xapi": "^4.1.0",
|
||||
"ajv": "^8.0.3",
|
||||
"app-conf": "^2.3.0",
|
||||
"async-iterator-to-stream": "^1.0.1",
|
||||
@@ -132,7 +131,7 @@
|
||||
"vhd-lib": "^4.9.0",
|
||||
"ws": "^8.2.3",
|
||||
"xdg-basedir": "^5.1.0",
|
||||
"xen-api": "^2.0.1",
|
||||
"xen-api": "^2.0.0",
|
||||
"xo-acl-resolver": "^0.4.1",
|
||||
"xo-collection": "^0.5.0",
|
||||
"xo-common": "^0.8.0",
|
||||
@@ -151,7 +150,7 @@
|
||||
"babel-plugin-transform-dev": "^2.0.1",
|
||||
"cross-env": "^7.0.2",
|
||||
"index-modules": "^0.4.3",
|
||||
"test": "^3.3.0"
|
||||
"tap": "^16.0.1"
|
||||
},
|
||||
"scripts": {
|
||||
"_build": "index-modules --index-file index.mjs src/api src/xapi/mixins src/xo-mixins && babel --delete-dir-on-start --keep-file-extension --source-maps --out-dir=dist/ src/",
|
||||
@@ -160,7 +159,7 @@
|
||||
"prepublishOnly": "yarn run build",
|
||||
"start": "node dist/cli.mjs",
|
||||
"pretest": "yarn run build",
|
||||
"test": "node--test dist"
|
||||
"test": "tap 'dist/**/*.spec.mjs'"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import assert from 'assert/strict'
|
||||
import test from 'test'
|
||||
import tap from 'tap'
|
||||
|
||||
import ensureArray from './_ensureArray.mjs'
|
||||
|
||||
const { describe, it } = test
|
||||
const { describe, it } = tap.mocha
|
||||
|
||||
describe('ensureArray()', function () {
|
||||
it('wrap the value in an array', function () {
|
||||
@@ -1,10 +1,9 @@
|
||||
import assert from 'node:assert'
|
||||
import test from 'test'
|
||||
import tap from 'tap'
|
||||
|
||||
import normalizeVmNetworks from './_normalizeVmNetworks.mjs'
|
||||
|
||||
test('normalizeVmNetworks', async t => {
|
||||
assert.deepStrictEqual(
|
||||
tap.test('normalizeVmNetworks', async t => {
|
||||
t.strictSame(
|
||||
normalizeVmNetworks({
|
||||
// legacy protocol
|
||||
'0/ip': '127.0.0.1 127.0.0.2',
|
||||
@@ -1,8 +1,8 @@
|
||||
import assert from 'assert/strict'
|
||||
import test from 'test'
|
||||
import tap from 'tap'
|
||||
import { debounceWithKey, REMOVE_CACHE_ENTRY } from './_pDebounceWithKey.mjs'
|
||||
|
||||
const { describe, it } = test
|
||||
const { describe, it } = tap.mocha
|
||||
|
||||
describe('REMOVE_CACHE_ENTRY', () => {
|
||||
it('clears the cache', async () => {
|
||||
@@ -200,31 +200,6 @@ rollingUpdate.resolve = {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function rollingReboot({ bypassBackupCheck, pool }) {
|
||||
const poolId = pool.id
|
||||
if (bypassBackupCheck) {
|
||||
log.warn('pool.rollingReboot update with argument "bypassBackupCheck" set to true', { poolId })
|
||||
} else {
|
||||
await backupGuard.call(this, poolId)
|
||||
}
|
||||
|
||||
await this.rollingPoolReboot(pool)
|
||||
}
|
||||
|
||||
rollingReboot.params = {
|
||||
bypassBackupCheck: {
|
||||
default: false,
|
||||
type: 'boolean',
|
||||
},
|
||||
pool: { type: 'string' },
|
||||
}
|
||||
|
||||
rollingReboot.resolve = {
|
||||
pool: ['pool', 'pool', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function getPatchesDifference({ source, target }) {
|
||||
return this.getPatchesDifference(target.id, source.id)
|
||||
}
|
||||
|
||||
@@ -277,50 +277,6 @@ createNfs.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
export async function createSmb({ host, nameLabel, nameDescription, server, user, password, srUuid }) {
|
||||
const xapi = this.getXapi(host)
|
||||
|
||||
const deviceConfig = {
|
||||
server,
|
||||
user,
|
||||
password,
|
||||
}
|
||||
|
||||
if (srUuid !== undefined) {
|
||||
return xapi.reattachSr({
|
||||
uuid: srUuid,
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
type: 'smb',
|
||||
deviceConfig,
|
||||
})
|
||||
}
|
||||
|
||||
const srRef = await xapi.SR_create({
|
||||
device_config: deviceConfig,
|
||||
host: host._xapiRef,
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
shared: true,
|
||||
type: 'smb',
|
||||
})
|
||||
|
||||
return xapi.getField('SR', srRef, 'uuid')
|
||||
}
|
||||
|
||||
createSmb.params = {
|
||||
host: { type: 'string' },
|
||||
nameLabel: { type: 'string' },
|
||||
nameDescription: { type: 'string', minLength: 0, default: '' },
|
||||
server: { type: 'string' },
|
||||
srUuid: { type: 'string', optional: true },
|
||||
user: { type: 'string', optional: true },
|
||||
password: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
createSmb.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
// -------------------------------------------------------------------
|
||||
// HBA SR
|
||||
|
||||
|
||||
@@ -694,8 +694,6 @@ set.params = {
|
||||
|
||||
name_description: { type: 'string', minLength: 0, optional: true },
|
||||
|
||||
notes: { type: ['string', 'null'], maxLength: 2048, optional: true },
|
||||
|
||||
high_availability: {
|
||||
optional: true,
|
||||
enum: getHaValues(),
|
||||
@@ -1326,7 +1324,7 @@ async function import_({ data, sr, type = 'xva', url }) {
|
||||
}
|
||||
|
||||
const ref = await xapi.VM_import(await hrp(url), sr._xapiRef)
|
||||
return xapi.call('VM.get_uuid', ref)
|
||||
return xapi.call('VM.get_by_uuid', ref)
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -1382,9 +1380,19 @@ import_.resolve = {
|
||||
|
||||
export { import_ as import }
|
||||
|
||||
export async function importFromEsxi({ host, network, password, sr, sslVerify = true, stopSource = false, user, vm }) {
|
||||
export async function importFromEsxi({
|
||||
host,
|
||||
network,
|
||||
password,
|
||||
sr,
|
||||
sslVerify = true,
|
||||
stopSource = false,
|
||||
thin = false,
|
||||
user,
|
||||
vm,
|
||||
}) {
|
||||
const task = await this.tasks.create({ name: `importing vm ${vm}` })
|
||||
return task.run(() => this.migrationfromEsxi({ host, user, password, sslVerify, vm, sr, network, stopSource }))
|
||||
return task.run(() => this.migrationfromEsxi({ host, user, password, sslVerify, thin, vm, sr, network, stopSource }))
|
||||
}
|
||||
|
||||
importFromEsxi.params = {
|
||||
@@ -1394,6 +1402,7 @@ importFromEsxi.params = {
|
||||
sr: { type: 'string' },
|
||||
sslVerify: { type: 'boolean', optional: true },
|
||||
stopSource: { type: 'boolean', optional: true },
|
||||
thin: { type: 'boolean', optional: true },
|
||||
user: { type: 'string' },
|
||||
vm: { type: 'string' },
|
||||
}
|
||||
|
||||
@@ -170,31 +170,13 @@ async function setUpPassport(express, xo, { authentication: authCfg, http: { coo
|
||||
|
||||
// Registers the sign in form.
|
||||
const signInPage = compilePug(await fse.readFile(new URL('../signin.pug', import.meta.url)))
|
||||
express.get('/signin', async (req, res, next) => {
|
||||
try {
|
||||
let errorMsg
|
||||
const tokenId = req.query.token
|
||||
if (tokenId !== undefined) {
|
||||
try {
|
||||
const token = await xo.getAuthenticationToken(tokenId)
|
||||
|
||||
req.session.isPersistent = req.query
|
||||
return saveToken(token, req, res, next)
|
||||
} catch (error) {
|
||||
errorMsg = error.message
|
||||
}
|
||||
} else {
|
||||
errorMsg = req.flash('error')[0]
|
||||
}
|
||||
res.send(
|
||||
signInPage({
|
||||
error: errorMsg,
|
||||
strategies,
|
||||
})
|
||||
)
|
||||
} catch (error) {
|
||||
next(error)
|
||||
}
|
||||
express.get('/signin', (req, res, next) => {
|
||||
res.send(
|
||||
signInPage({
|
||||
error: req.flash('error')[0],
|
||||
strategies,
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
express.get('/signout', (req, res) => {
|
||||
@@ -224,7 +206,7 @@ async function setUpPassport(express, xo, { authentication: authCfg, http: { coo
|
||||
}
|
||||
|
||||
if (await verifyTotp(req.body.otp, { secret: user.preferences.otp })) {
|
||||
createAndSaveToken(req, res, next)
|
||||
setToken(req, res, next)
|
||||
} else {
|
||||
req.flash('error', 'Invalid code')
|
||||
res.redirect(303, '/signin-otp')
|
||||
@@ -234,7 +216,7 @@ async function setUpPassport(express, xo, { authentication: authCfg, http: { coo
|
||||
const PERMANENT_VALIDITY = ifDef(authCfg.permanentCookieValidity, parseDuration)
|
||||
const SESSION_VALIDITY = ifDef(authCfg.sessionCookieValidity, parseDuration)
|
||||
const TEN_YEARS = 10 * 365 * 24 * 60 * 60 * 1e3
|
||||
const createAndSaveToken = async (req, res, next) => {
|
||||
const setToken = async (req, res, next) => {
|
||||
let { clientId } = req.cookies
|
||||
if (clientId === undefined) {
|
||||
clientId = Math.random().toString(36).slice(2)
|
||||
@@ -255,20 +237,17 @@ async function setUpPassport(express, xo, { authentication: authCfg, http: { coo
|
||||
expiresIn: isPersistent ? PERMANENT_VALIDITY : SESSION_VALIDITY,
|
||||
userId: user.id,
|
||||
})
|
||||
delete req.session.user
|
||||
|
||||
return saveToken(token, req, res, next)
|
||||
}
|
||||
const saveToken = async (token, req, res, next) => {
|
||||
res.cookie('token', token.id, {
|
||||
...cookieCfg,
|
||||
|
||||
// a session (non-permanent) cookie must not have an expiration date
|
||||
// because it must not survive browser restart
|
||||
...(req.session.isPersistent ? { expires: new Date(token.expiration) } : undefined),
|
||||
...(isPersistent ? { expires: new Date(token.expiration) } : undefined),
|
||||
})
|
||||
|
||||
delete req.session.isPersistent
|
||||
delete req.session.user
|
||||
res.redirect(303, req.flash('return-url')[0] || '/')
|
||||
}
|
||||
|
||||
@@ -309,7 +288,7 @@ async function setUpPassport(express, xo, { authentication: authCfg, http: { coo
|
||||
return res.redirect(303, '/signin-otp')
|
||||
}
|
||||
|
||||
createAndSaveToken(req, res, next)
|
||||
setToken(req, res, next)
|
||||
})(req, res, next)
|
||||
}
|
||||
|
||||
|
||||
@@ -48,18 +48,15 @@ const getLogs = (db, args) => {
|
||||
|
||||
const deleteLogs = (db, args) =>
|
||||
new Promise(resolve => {
|
||||
let nDeleted = 0
|
||||
let nRunning = 1
|
||||
let count = 1
|
||||
const cb = () => {
|
||||
if (--nRunning === 0) {
|
||||
console.log(nDeleted.toLocaleString(), 'deleted entries')
|
||||
if (--count === 0) {
|
||||
resolve()
|
||||
}
|
||||
}
|
||||
|
||||
const deleteEntry = key => {
|
||||
++nDeleted
|
||||
++nRunning
|
||||
++count
|
||||
db.del(key, cb)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import assert from 'assert/strict'
|
||||
import forEach from 'lodash/forEach.js'
|
||||
import test from 'test'
|
||||
import tap from 'tap'
|
||||
import { thunkToArray } from './utils.mjs'
|
||||
import { crossProduct, mergeObjects } from './math.mjs'
|
||||
|
||||
const { describe, it } = test
|
||||
const { describe, it } = tap.mocha
|
||||
|
||||
describe('mergeObjects', function () {
|
||||
forEach(
|
||||
@@ -1,11 +1,11 @@
|
||||
import assert from 'assert/strict'
|
||||
import test from 'test'
|
||||
import tap from 'tap'
|
||||
import { createReadStream, readFile } from 'fs'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
|
||||
import streamToExistingBuffer from './stream-to-existing-buffer.mjs'
|
||||
|
||||
const { describe, it } = test
|
||||
const { describe, it } = tap.mocha
|
||||
|
||||
describe('streamToExistingBuffer()', () => {
|
||||
it('read the content of a stream in a buffer', async () => {
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user