Compare commits

..

1 Commits

Author SHA1 Message Date
Florent Beauchamp
4984d2c5dd fix(vhd-lib): don't throw error when a parent locator is missing 2021-12-02 09:07:15 +01:00
48 changed files with 154 additions and 555 deletions

View File

@@ -65,23 +65,6 @@ const f = compose(
)
```
Functions can receive extra parameters:
```js
const isIn = (value, min, max) => min <= value && value <= max
// Only compatible when `fns` is passed as an array!
const f = compose([
[add, 2],
[isIn, 3, 10],
])
console.log(f(1))
// → true
```
> Note: if the first function is defined with extra parameters, it will only receive the first value passed to the composed function, instead of all the parameters.
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -46,20 +46,3 @@ const f = compose(
[add2, mul3]
)
```
Functions can receive extra parameters:
```js
const isIn = (value, min, max) => min <= value && value <= max
// Only compatible when `fns` is passed as an array!
const f = compose([
[add, 2],
[isIn, 3, 10],
])
console.log(f(1))
// → true
```
> Note: if the first function is defined with extra parameters, it will only receive the first value passed to the composed function, instead of all the parameters.

View File

@@ -4,13 +4,11 @@ const defaultOpts = { async: false, right: false }
exports.compose = function compose(opts, fns) {
if (Array.isArray(opts)) {
fns = opts.slice() // don't mutate passed array
fns = opts
opts = defaultOpts
} else if (typeof opts === 'object') {
opts = Object.assign({}, defaultOpts, opts)
if (Array.isArray(fns)) {
fns = fns.slice() // don't mutate passed array
} else {
if (!Array.isArray(fns)) {
fns = Array.prototype.slice.call(arguments, 1)
}
} else {
@@ -22,24 +20,6 @@ exports.compose = function compose(opts, fns) {
if (n === 0) {
throw new TypeError('at least one function must be passed')
}
for (let i = 0; i < n; ++i) {
const entry = fns[i]
if (Array.isArray(entry)) {
const fn = entry[0]
const args = entry.slice()
args[0] = undefined
fns[i] = function composeWithArgs(value) {
args[0] = value
try {
return fn.apply(this, args)
} finally {
args[0] = undefined
}
}
}
}
if (n === 1) {
return fns[0]
}

View File

@@ -14,7 +14,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "2.1.0",
"version": "2.0.0",
"engines": {
"node": ">=7.6"
},

View File

@@ -59,17 +59,6 @@ decorateMethodsWith(Foo, {
The decorated class is returned, so you can export it directly.
To apply multiple transforms to a method, you can either call `decorateMethodsWith` multiple times or use [`@vates/compose`](https://www.npmjs.com/package/@vates/compose):
```js
decorateMethodsWith(Foo, {
bar: compose([
[lodash.debounce, 150]
lodash.curry,
])
})
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -40,14 +40,3 @@ decorateMethodsWith(Foo, {
```
The decorated class is returned, so you can export it directly.
To apply multiple transforms to a method, you can either call `decorateMethodsWith` multiple times or use [`@vates/compose`](https://www.npmjs.com/package/@vates/compose):
```js
decorateMethodsWith(Foo, {
bar: compose([
[lodash.debounce, 150]
lodash.curry,
])
})
```

View File

@@ -7,8 +7,8 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.16.2",
"@xen-orchestra/fs": "^0.19.2",
"@xen-orchestra/backups": "^0.16.0",
"@xen-orchestra/fs": "^0.19.1",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",

View File

@@ -469,10 +469,10 @@ class RemoteAdapter {
async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
const handler = this._handler
let dataPath = path
if (path.endsWith('.alias.vhd')) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
await createVhdDirectoryFromStream(handler, dataPath, input, {
await createVhdDirectoryFromStream(handler, `${dirname(path)}/data/${uuidv4()}.vhd`, input, {
concurrency: 16,
async validator() {
await input.task
@@ -481,7 +481,7 @@ class RemoteAdapter {
})
await VhdAbstract.createAlias(handler, path, dataPath)
} else {
await this.outputStream(path, input, { checksum, validator })
await this.outputStream(dataPath, input, { checksum, validator })
}
}

View File

@@ -36,11 +36,6 @@ const forkDeltaExport = deltaExport =>
exports.VmBackup = class VmBackup {
constructor({ config, getSnapshotNameLabel, job, remoteAdapters, remotes, schedule, settings, srs, vm }) {
if (vm.other_config['xo:backup:job'] === job.id) {
// otherwise replicated VMs would be matched and replicated again and again
throw new Error('cannot backup a VM created by this very job')
}
this.config = config
this.job = job
this.remoteAdapters = remoteAdapters
@@ -338,16 +333,13 @@ exports.VmBackup = class VmBackup {
const baseUuidToSrcVdi = new Map()
await asyncMap(await baseVm.$getDisks(), async baseRef => {
const [baseUuid, snapshotOf] = await Promise.all([
xapi.getField('VDI', baseRef, 'uuid'),
xapi.getField('VDI', baseRef, 'snapshot_of'),
])
const snapshotOf = await xapi.getField('VDI', baseRef, 'snapshot_of')
const srcVdi = srcVdis[snapshotOf]
if (srcVdi !== undefined) {
baseUuidToSrcVdi.set(baseUuid, srcVdi)
baseUuidToSrcVdi.set(await xapi.getField('VDI', baseRef, 'uuid'), srcVdi)
} else {
debug('ignore snapshot VDI because no longer present on VM', {
vdi: baseUuid,
debug('no base VDI found', {
vdi: srcVdi.uuid,
})
}
})
@@ -359,11 +351,6 @@ exports.VmBackup = class VmBackup {
false
)
if (presentBaseVdis.size === 0) {
debug('no base VM found')
return
}
const fullVdisRequired = new Set()
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
if (presentBaseVdis.has(baseUuid)) {

View File

@@ -256,12 +256,9 @@ exports.cleanVm = async function cleanVm(
const { vhds } = metadata
return Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
})()
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
// FIXME: find better approach by keeping as much of the backup as
// possible (existing disks) even if one disk is missing
if (missingVhds.length === 0) {
if (linkedVhds.every(_ => vhds.has(_))) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
// checking the size of a vhd directory is costly
@@ -280,7 +277,7 @@ exports.cleanVm = async function cleanVm(
}
}
} else {
onLog(`Some VHDs linked to the metadata ${json} are missing`, { missingVhds })
onLog(`Some VHDs linked to the metadata ${json} are missing`)
if (remove) {
onLog(`deleting incomplete backup ${json}`)
await handler.unlink(json)

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.16.2",
"version": "0.16.0",
"engines": {
"node": ">=14.6"
},
@@ -16,11 +16,11 @@
"postversion": "npm publish --access public"
},
"dependencies": {
"@vates/compose": "^2.1.0",
"@vates/compose": "^2.0.0",
"@vates/disposable": "^0.1.1",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^0.19.2",
"@xen-orchestra/fs": "^0.19.1",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^4.0.1",
@@ -36,7 +36,7 @@
"proper-lockfile": "^4.1.2",
"pump": "^3.0.0",
"uuid": "^8.3.2",
"vhd-lib": "^2.0.3",
"vhd-lib": "^2.0.1",
"yazl": "^2.5.1"
},
"peerDependencies": {

View File

@@ -21,18 +21,10 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
this.#vmBackupDir = getVmBackupDir(this._backup.vm.uuid)
}
async _cleanVm(options) {
try {
return await this._adapter.cleanVm(this.#vmBackupDir, {
...options,
fixMetadata: true,
onLog: warn,
lock: false,
})
} catch (error) {
warn(error)
return {}
}
_cleanVm(options) {
return this._adapter
.cleanVm(this.#vmBackupDir, { ...options, fixMetadata: true, onLog: warn, lock: false })
.catch(warn)
}
async beforeBackup() {
@@ -51,13 +43,7 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
// merge worker only compatible with local remotes
const { handler } = this._adapter
if (merge && !disableMergeWorker && typeof handler._getRealPath === 'function') {
const taskFile =
join(MergeWorker.CLEAN_VM_QUEUE, formatFilenameDate(new Date())) +
'-' +
// add a random suffix to avoid collision in case multiple tasks are created at the same second
Math.random().toString(36).slice(2)
await handler.outputFile(taskFile, this._backup.vm.uuid)
await handler.outputFile(join(MergeWorker.CLEAN_VM_QUEUE, formatFilenameDate(new Date())), this._backup.vm.uuid)
const remotePath = handler._getRealPath()
await MergeWorker.run(remotePath)
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "0.19.2",
"version": "0.19.1",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",

View File

@@ -152,30 +152,16 @@ export default class S3Handler extends RemoteHandlerAbstract {
const splitPrefix = splitPath(prefix)
const result = await this._s3.listObjectsV2({
Bucket: this._bucket,
Prefix: splitPrefix.join('/') + '/', // need slash at the end with the use of delimiters
Delimiter: '/', // will only return path until delimiters
Prefix: splitPrefix.join('/'),
})
if (result.isTruncated) {
const error = new Error('more than 1000 objects, unsupported in this implementation')
error.dir = dir
throw error
}
const uniq = []
// sub directories
for (const entry of result.CommonPrefixes) {
const line = splitPath(entry.Prefix)
uniq.push(line[line.length - 1])
}
// files
const uniq = new Set()
for (const entry of result.Contents) {
const line = splitPath(entry.Key)
uniq.push(line[line.length - 1])
if (line.length > splitPrefix.length) {
uniq.add(line[splitPrefix.length])
}
}
return uniq
return [...uniq]
}
async _mkdir(path) {
@@ -236,9 +222,9 @@ export default class S3Handler extends RemoteHandlerAbstract {
// nothing to do, directories do not exist, they are part of the files' path
}
// reimplement _rmtree to handle efficiantly path with more than 1000 entries in trees
// reimplement _rmTree to handle efficiantly path with more than 1000 entries in trees
// @todo : use parallel processing for unlink
async _rmtree(path) {
async _rmTree(path) {
let NextContinuationToken
do {
const result = await this._s3.listObjectsV2({
@@ -247,13 +233,8 @@ export default class S3Handler extends RemoteHandlerAbstract {
ContinuationToken: NextContinuationToken,
})
NextContinuationToken = result.isTruncated ? null : result.NextContinuationToken
for (const { Key } of result.Contents) {
// _unlink will add the prefix, but Key contains everything
// also we don't need to check if we delete a directory, since the list only return files
await this._s3.deleteObject({
Bucket: this._bucket,
Key,
})
for (const path of result.Contents) {
await this._unlink(path)
}
} while (NextContinuationToken !== null)
}

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.15.5",
"version": "0.15.3",
"license": "AGPL-3.0-or-later",
"description": "XO Proxy used to remotely execute backup jobs",
"keywords": [
@@ -27,12 +27,12 @@
"dependencies": {
"@iarna/toml": "^2.2.0",
"@koa/router": "^10.0.0",
"@vates/compose": "^2.1.0",
"@vates/compose": "^2.0.0",
"@vates/decorate-with": "^0.1.0",
"@vates/disposable": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.16.2",
"@xen-orchestra/fs": "^0.19.2",
"@xen-orchestra/backups": "^0.16.0",
"@xen-orchestra/fs": "^0.19.1",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.1.1",

View File

@@ -1,7 +1,4 @@
const { execFile } = require('child_process')
const { promisify } = require('util')
const randomBytes = promisify(require('crypto').randomBytes)
const openssl = (cmd, args, { input, ...opts } = {}) =>
new Promise((resolve, reject) => {
@@ -13,35 +10,12 @@ const openssl = (cmd, args, { input, ...opts } = {}) =>
}
})
const req = (key, selfSigned, { days = 360 } = {}) => {
const args = ['-batch', '-new', '-key', '-', '-nodes']
if (selfSigned) {
args.push('-x509', '-days', String(days))
}
return openssl('req', args, { input: key })
}
exports.genSelfSignedCert = async opts => {
exports.genSelfSignedCert = async ({ days = 360 } = {}) => {
const key = await openssl('genrsa', ['2048'])
return {
cert: await req(key, true, opts),
key,
}
}
exports.genSignedCert = async (ca, { days = 360 } = {}) => {
const key = await openssl('genrsa', ['2048'])
const csr = await req(key, false)
const serial = '0x' + (await randomBytes(40)).toString('hex')
const input = [csr, ca.cert, ca.key].join('\n')
return {
cert: await openssl(
'x509',
['-req', '-in', '-', '-CA', '-', '-CAkey', '-', '-days', String(days), '-set_serial', serial],
{
input,
}
),
cert: await openssl('req', ['-batch', '-new', '-key', '-', '-x509', '-days', String(days), '-nodes'], {
input: key,
}),
key,
}
}

View File

@@ -45,7 +45,7 @@
"strip-indent": "^3.0.0",
"xdg-basedir": "^4.0.0",
"xo-lib": "^0.11.1",
"xo-vmdk-to-vhd": "^2.0.1"
"xo-vmdk-to-vhd": "^2.0.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1,44 +1,9 @@
# ChangeLog
## **5.65.2** (2021-12-10)
## **5.65.0** (2021-11-30)
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
### Bug fixes
- [Backup] Fix `handler.rmTree` is not a function (Forum [5256](https://xcp-ng.org/forum/topic/5256/s3-backup-try-it/29) PR [#6041](https://github.com/vatesfr/xen-orchestra/pull/6041) )
- [Backup] Fix `EEXIST` in logs when multiple merge tasks are created at the same time ([Forum #5301](https://xcp-ng.org/forum/topic/5301/warnings-errors-in-journalctl))
- [Backup] Fix missing backup on restore (Forum [5256](https://xcp-ng.org/forum/topic/5256/s3-backup-try-it/29) (PR [#6048](https://github.com/vatesfr/xen-orchestra/pull/6048))
### Released packages
- @xen-orchestra/fs 0.19.2
- vhd-lib 2.0.3
- @xen-orchestra/backups 0.16.2
- xo-server 5.84.3
- @xen-orchestra/proxy 0.15.5
## **5.65.1** (2021-12-03)
### Bug fixes
- [Delta Backup Restoration] Fix assertion error [Forum #5257](https://xcp-ng.org/forum/topic/5257/problems-building-from-source/16)
- [Delta Backup Restoration] `TypeError: this disposable has already been disposed` [Forum #5257](https://xcp-ng.org/forum/topic/5257/problems-building-from-source/20)
- [Backups] Fix: `Error: Chaining alias is forbidden xo-vm-backups/..alias.vhd to xo-vm-backups/....alias.vhd` when backuping a file to s3 [Forum #5226](https://xcp-ng.org/forum/topic/5256/s3-backup-try-it)
- [Delta Backup Restoration] `VDI_IO_ERROR(Device I/O errors)` [Forum #5727](https://xcp-ng.org/forum/topic/5257/problems-building-from-source/4) (PR [#6031](https://github.com/vatesfr/xen-orchestra/pull/6031))
- [Delta Backup] Fix `Cannot read property 'uuid' of undefined` when a VDI has been removed from a backed up VM (PR [#6034](https://github.com/vatesfr/xen-orchestra/pull/6034))
### Released packages
- @vates/compose 2.1.0
- vhd-lib 2.0.2
- xo-vmdk-to-vhd 2.0.1
- @xen-orchestra/backups 0.16.1
- @xen-orchestra/proxy 0.15.4
- xo-server 5.84.2
## **5.65.0** (2021-11-30)
### Highlights
- [VM] Ability to export a snapshot's memory (PR [#6015](https://github.com/vatesfr/xen-orchestra/pull/6015))
@@ -54,7 +19,7 @@
- [Import/VM] Fix the import of OVA files (PR [#5976](https://github.com/vatesfr/xen-orchestra/pull/5976))
### Released packages
### Packages to release
- @vates/async-each 0.1.0
- xo-remote-parser 0.8.4

View File

@@ -7,17 +7,13 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [About] Show commit instead of version numbers for source users (PR [#6045](https://github.com/vatesfr/xen-orchestra/pull/6045))
- [Health] Display default SRs that aren't shared [#5871](https://github.com/vatesfr/xen-orchestra/issues/5871) (PR [#6033](https://github.com/vatesfr/xen-orchestra/pull/6033))
- [Pool,VM/advanced] Ability to change the suspend SR [#4163](https://github.com/vatesfr/xen-orchestra/issues/4163) (PR [#6044](https://github.com/vatesfr/xen-orchestra/pull/6044))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [Tables/actions] Fix collapsed actions being clickable despite being disabled (PR [#6023](https://github.com/vatesfr/xen-orchestra/pull/6023))
- [Continuous Replication] Fix `could not find the base VM`
- [Backup/Smart mode] Always ignore replicated VMs created by the current job
- [Delta Backup Restoration] Fix assertion error [Forum #5257](https://xcp-ng.org/forum/topic/5257/problems-building-from-source/16)
- [Delta Backup Restoration] `TypeError: this disposable has already been disposed` [Forum #5257](https://xcp-ng.org/forum/topic/5257/problems-building-from-source/20)
- [Delta Backup Restoration] Fix error `ENOENT: no such file '/xo-vm-backups/../parentLocatorEntryN' with vhd having empty parent locator
### Packages to release
@@ -36,7 +32,8 @@
>
> In case of conflict, the highest (lowest in previous list) `$version` wins.
- @xen-orchestra/backups minor
- @xen-orchestra/proxy minor
- xo-server minor
- xo-web minor
- xo-vmdk-to-vhd patch
- vhd-lib patch
- @xen-orchestra/backups patch
- @xen-orchestra/proxy patch
- xo-server patch

View File

@@ -341,16 +341,6 @@ XO will try to find the right prefix for each IP address. If it can't find a pre
- Generate a token:
- Go to Admin > Tokens > Add token
- Create a token with "Write enabled"
- The owner of the token must have at least the following permissions:
- View permissions on:
- extras > custom-fields
- ipam > prefixes
- All permissions on:
- ipam > ip-addresses
- virtualization > cluster-types
- virtualization > clusters
- virtualization > interfaces
- virtualization > virtual-machines
- Add a UUID custom field (for **Netbox 2.x**):
- Got to Admin > Custom fields > Add custom field
- Create a custom field called "uuid" (lower case!)

View File

@@ -103,9 +103,9 @@ In that case, you already set the password for `xoa` user. If you forgot it, see
### Manually deployed
If you connect via SSH or console for the first time without using our [web deploy form](https://xen-orchestra.com/#!/xoa), be aware **there is NO default password set for security reasons**. To set it, you need to connect to your host to find the XOA VM UUID (eg via `xe vm-list`).
If you connect via SSH or console for the first time without using our [web deploy form](https://xen-orchestra.com/#!/xoa), be aware **there's NO default password set for security reasons**. To set it, you need to connect to your host to find the XOA VM UUID (eg via `xe vm-list`).
Next, you can replace `<UUID>` with the UUID you found previously, and `<password>` with your password:
Then replace `<UUID>` with the previously find UUID, and `<password>` with your password:
```
xe vm-param-set uuid=<UUID> xenstore-data:vm-data/system-account-xoa-password=<password>
@@ -115,9 +115,7 @@ xe vm-param-set uuid=<UUID> xenstore-data:vm-data/system-account-xoa-password=<p
Don't forget to use quotes for your password, eg: `xenstore-data:vm-data/system-account-xoa-password='MyPassW0rd!'`
:::
Finally, you must reboot the VM to implement the changes.
You can now connect with the `xoa` username and password you defined in the previous command, eg with `ssh xoa@<XOA IP ADDRESS>`.
Then, you could connect with `xoa` username and the password you defined in the previous command, eg with `ssh xoa@<XOA IP ADDRESS>`.
### Using sudo

View File

@@ -24,12 +24,12 @@
"node": ">=8.10"
},
"dependencies": {
"@xen-orchestra/fs": "^0.19.2",
"@xen-orchestra/fs": "^0.19.1",
"cli-progress": "^3.1.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"human-format": "^0.11.0",
"vhd-lib": "^2.0.3"
"vhd-lib": "^2.0.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "vhd-lib",
"version": "2.0.3",
"version": "2.0.1",
"license": "AGPL-3.0-or-later",
"description": "Primitives for VHD file handling",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
@@ -31,7 +31,7 @@
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@xen-orchestra/fs": "^0.19.2",
"@xen-orchestra/fs": "^0.19.1",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"execa": "^5.0.0",

View File

@@ -9,7 +9,7 @@ import { Disposable, pFromCallback } from 'promise-toolbox'
import { openVhd } from '../index'
import { checkFile, createRandomFile, convertFromRawToVhd, createRandomVhdDirectory } from '../tests/utils'
import { VhdAbstract } from './VhdAbstract'
import { BLOCK_UNUSED, FOOTER_SIZE, HEADER_SIZE, PLATFORMS, SECTOR_SIZE } from '../_constants'
import { BLOCK_UNUSED, FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from '../_constants'
import { unpackHeader, unpackFooter } from './_utils'
let tempDir
@@ -163,22 +163,20 @@ test('it can create a vhd stream', async () => {
await createRandomFile(rawFileName, initalSize)
const vhdFileName = `${tempDir}/vhd.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const bat = Buffer.alloc(512)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file://' + tempDir })
const vhd = yield openVhd(handler, 'vhd.vhd')
await vhd.readBlockAllocationTable()
const parentLocatorBase = Buffer.from('a file path, not aligned', 'utf16le')
const aligned = Buffer.alloc(SECTOR_SIZE, 0)
parentLocatorBase.copy(aligned)
await vhd.writeParentLocator({
id: 0,
platformCode: PLATFORMS.W2KU,
data: parentLocatorBase,
})
await vhd.writeFooter()
// mark first block as unused
await handler.read('vhd.vhd', bat, vhd.header.tableOffset)
bat.writeUInt32BE(BLOCK_UNUSED, 0)
await handler.write('vhd.vhd', bat, vhd.header.tableOffset)
// read our modified bat
await vhd.readBlockAllocationTable()
const stream = vhd.stream()
// read all the stream into a buffer
@@ -196,21 +194,17 @@ test('it can create a vhd stream', async () => {
expect(() => unpackHeader(bufHeader, footer)).not.toThrow()
// 1 deleted block should be in ouput
let start = FOOTER_SIZE + HEADER_SIZE + vhd.batSize
const parentLocatorData = buffer.slice(start, start + SECTOR_SIZE)
expect(parentLocatorData.equals(aligned)).toEqual(true)
start += SECTOR_SIZE // parent locator
expect(length).toEqual(start + initialNbBlocks * vhd.fullBlockSize + FOOTER_SIZE)
const start = FOOTER_SIZE + HEADER_SIZE + vhd.batSize
expect(length).toEqual(start + (initialNbBlocks - 1) * vhd.fullBlockSize + FOOTER_SIZE)
expect(stream.length).toEqual(buffer.length)
// blocks
const blockBuf = Buffer.alloc(vhd.sectorsPerBlock * SECTOR_SIZE, 0)
for (let i = 0; i < initialNbBlocks; i++) {
const blockDataStart = start + i * vhd.fullBlockSize + 512 /* block bitmap */
for (let i = 1; i < initialNbBlocks; i++) {
const blockDataStart = start + (i - 1) * vhd.fullBlockSize + 512 /* block bitmap */
const blockDataEnd = blockDataStart + vhd.sectorsPerBlock * SECTOR_SIZE
const content = buffer.slice(blockDataStart, blockDataEnd)
await handler.read('randomfile', blockBuf, i * vhd.sectorsPerBlock * SECTOR_SIZE)
expect(content.equals(blockBuf)).toEqual(true)
expect(content).toEqual(blockBuf)
}
// footer
const endFooter = buffer.slice(length - FOOTER_SIZE)

View File

@@ -1,13 +1,5 @@
import { computeBatSize, computeSectorOfBitmap, computeSectorsPerBlock, sectorsToBytes } from './_utils'
import {
ALIAS_MAX_PATH_LENGTH,
PLATFORMS,
SECTOR_SIZE,
PARENT_LOCATOR_ENTRIES,
FOOTER_SIZE,
HEADER_SIZE,
BLOCK_UNUSED,
} from '../_constants'
import { PLATFORMS, SECTOR_SIZE, PARENT_LOCATOR_ENTRIES, FOOTER_SIZE, HEADER_SIZE, BLOCK_UNUSED } from '../_constants'
import assert from 'assert'
import path from 'path'
import asyncIteratorToStream from 'async-iterator-to-stream'
@@ -15,6 +7,9 @@ import { checksumStruct, fuFooter, fuHeader } from '../_structs'
import { isVhdAlias, resolveAlias } from '../_resolveAlias'
export class VhdAbstract {
#header
footer
get bitmapSize() {
return sectorsToBytes(this.sectorsOfBitmap)
}
@@ -23,6 +18,11 @@ export class VhdAbstract {
return sectorsToBytes(this.sectorsOfBitmap + this.sectorsPerBlock)
}
get header() {
assert.notStrictEqual(this.#header, undefined, `header must be read before it's used`)
return this.#header
}
get sectorsOfBitmap() {
return computeSectorOfBitmap(this.header.blockSize)
}
@@ -31,12 +31,8 @@ export class VhdAbstract {
return computeSectorsPerBlock(this.header.blockSize)
}
get header() {
throw new Error('get header is not implemented')
}
get footer() {
throw new Error('get footer not implemented')
set header(header) {
this.#header = header
}
/**
@@ -141,7 +137,7 @@ export class VhdAbstract {
const entry = this.header.parentLocatorEntry[id]
const dataSpaceSectors = Math.ceil(data.length / SECTOR_SIZE)
entry.platformCode = platformCode
entry.platformDataSpace = dataSpaceSectors
entry.platformDataSpace = dataSpaceSectors * SECTOR_SIZE
entry.platformDataLength = data.length
}
@@ -221,12 +217,6 @@ export class VhdAbstract {
const aliasDir = path.dirname(path.resolve('/', aliasPath))
// only store the relative path from alias to target
const relativePathToTarget = path.relative(aliasDir, path.resolve('/', targetPath))
if (relativePathToTarget.length > ALIAS_MAX_PATH_LENGTH) {
throw new Error(
`Alias relative path ${relativePathToTarget} is too long : ${relativePathToTarget.length} chars, max is ${ALIAS_MAX_PATH_LENGTH}`
)
}
await handler.writeFile(aliasPath, relativePathToTarget)
}
@@ -236,23 +226,23 @@ export class VhdAbstract {
const rawFooter = fuFooter.pack(footer)
checksumStruct(rawFooter, fuFooter)
// compute parent locator place and size
// update them in header
// update checksum in header
let offset = FOOTER_SIZE + HEADER_SIZE + batSize
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
const { ...entry } = header.parentLocatorEntry[i]
if (entry.platformDataSpace > 0) {
entry.platformDataOffset = offset
offset += entry.platformDataSpace
}
header.parentLocatorEntry[i] = entry
}
const rawHeader = fuHeader.pack(header)
checksumStruct(rawHeader, fuHeader)
// add parentlocator size
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
header.parentLocatorEntry[i] = {
...header.parentLocatorEntry[i],
platformDataOffset: offset,
}
offset += header.parentLocatorEntry[i].platformDataSpace * SECTOR_SIZE
}
assert.strictEqual(offset % SECTOR_SIZE, 0)
const bat = Buffer.allocUnsafe(batSize)
@@ -276,16 +266,12 @@ export class VhdAbstract {
yield rawHeader
yield bat
// yield parent locator
// yield parent locator entries
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
const space = header.parentLocatorEntry[i].platformDataSpace * SECTOR_SIZE
if (space > 0) {
const data = (await self.readParentLocator(i)).data
// align data to a sector
const buffer = Buffer.alloc(space, 0)
data.copy(buffer)
yield buffer
if (header.parentLocatorEntry[i].platformDataSpace > 0) {
const parentLocator = await self.readParentLocator(i)
// @ todo pad to platformDataSpace
yield parentLocator.data
}
}

View File

@@ -20,17 +20,14 @@ const { debug } = createLogger('vhd-lib:VhdDirectory')
export class VhdDirectory extends VhdAbstract {
#uncheckedBlockTable
#header
footer
set header(header) {
this.#header = header
super.header = header
this.#blockTable = Buffer.alloc(header.maxTableEntries)
}
get header() {
assert.notStrictEqual(this.#header, undefined, `header must be read before it's used`)
return this.#header
return super.header
}
get #blockTable() {
@@ -185,7 +182,14 @@ export class VhdDirectory extends VhdAbstract {
}
async _readParentLocatorData(id) {
return (await this._readChunk('parentLocatorEntry' + id)).buffer
try {
return (await this._readChunk('parentLocatorEntry' + id)).buffer
} catch (e) {
if (e.code === 'ENOENT') {
return Buffer.alloc(0)
}
throw e
}
}
async _writeParentLocatorData(id, data) {

View File

@@ -45,8 +45,6 @@ const { debug } = createLogger('vhd-lib:VhdFile')
export class VhdFile extends VhdAbstract {
#uncheckedBlockTable
#header
footer
get #blockTable() {
assert.notStrictEqual(this.#uncheckedBlockTable, undefined, 'Block table must be initialized before access')
@@ -62,7 +60,7 @@ export class VhdFile extends VhdAbstract {
}
set header(header) {
this.#header = header
super.header = header
const size = this.batSize
this.#blockTable = Buffer.alloc(size)
for (let i = 0; i < this.header.maxTableEntries; i++) {
@@ -70,7 +68,7 @@ export class VhdFile extends VhdAbstract {
}
}
get header() {
return this.#header
return super.header
}
static async open(handler, path, { flags, checkSecondFooter = true } = {}) {
@@ -416,7 +414,7 @@ export class VhdFile extends VhdAbstract {
async _readParentLocatorData(parentLocatorId) {
const { platformDataOffset, platformDataLength } = this.header.parentLocatorEntry[parentLocatorId]
if (platformDataLength > 0) {
return await this._read(platformDataOffset, platformDataLength)
return (await this._read(platformDataOffset, platformDataLength)).buffer
}
return Buffer.alloc(0)
}
@@ -428,8 +426,7 @@ export class VhdFile extends VhdAbstract {
// reset offset if data is empty
header.parentLocatorEntry[parentLocatorId].platformDataOffset = 0
} else {
const space = header.parentLocatorEntry[parentLocatorId].platformDataSpace * SECTOR_SIZE
if (data.length <= space) {
if (data.length <= header.parentLocatorEntry[parentLocatorId].platformDataSpace) {
// new parent locator length is smaller than available space : keep it in place
position = header.parentLocatorEntry[parentLocatorId].platformDataOffset
} else {
@@ -444,7 +441,7 @@ export class VhdFile extends VhdAbstract {
// move the first(s) block(s) at the end of the data
// move the parent locator to the precedent position of the first block
const { firstSector } = firstAndLastBlocks
await this._freeFirstBlockSpace(space)
await this._freeFirstBlockSpace(header.parentLocatorEntry[parentLocatorId].platformDataSpace)
position = sectorsToBytes(firstSector)
}
}

View File

@@ -1,5 +1,4 @@
import * as UUID from 'uuid'
import cloneDeep from 'lodash/cloneDeep.js'
import { asyncMap } from '@xen-orchestra/async-map'
import { VhdAbstract } from './VhdAbstract'
import { DISK_TYPES, FOOTER_SIZE, HEADER_SIZE } from '../_constants'
@@ -8,6 +7,9 @@ import assert from 'assert'
export class VhdSynthetic extends VhdAbstract {
#vhds = []
set header(_) {
throw new Error('Header is read only for VhdSynthetic')
}
get header() {
// this the VHD we want to synthetize
@@ -20,7 +22,6 @@ export class VhdSynthetic extends VhdAbstract {
// TODO: set parentLocatorEntry-s in header
return {
...vhd.header,
parentLocatorEntry: cloneDeep(rootVhd.header.parentLocatorEntry),
tableOffset: FOOTER_SIZE + HEADER_SIZE,
parentTimestamp: rootVhd.header.parentTimestamp,
parentUnicodeName: rootVhd.header.parentUnicodeName,
@@ -28,6 +29,10 @@ export class VhdSynthetic extends VhdAbstract {
}
}
set footer(_) {
throw new Error('Footer is read only for VhdSynthetic')
}
get footer() {
// this is the root VHD
const rootVhd = this.#vhds[this.#vhds.length - 1]

View File

@@ -36,5 +36,3 @@ export const PLATFORMS = {
export const FILE_FORMAT_VERSION = 1 << 16
export const HEADER_VERSION = 1 << 16
export const ALIAS_MAX_PATH_LENGTH = 1024

View File

@@ -6,7 +6,6 @@ import { getSyncedHandler } from '@xen-orchestra/fs'
import { Disposable, pFromCallback } from 'promise-toolbox'
import { isVhdAlias, resolveAlias } from './_resolveAlias'
import { ALIAS_MAX_PATH_LENGTH } from './_constants'
let tempDir
@@ -33,32 +32,25 @@ test('resolve return the path in argument for a non alias file ', async () => {
test('resolve get the path of the target file for an alias', async () => {
await Disposable.use(async function* () {
// same directory
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const alias = `alias.alias.vhd`
const handler = yield getSyncedHandler({ url: 'file:///' })
const tempDirFomRemoteUrl = tempDir.slice(1) // remove the / which is included in the remote url
const alias = `${tempDirFomRemoteUrl}/alias.alias.vhd`
await handler.writeFile(alias, 'target.vhd')
await expect(await resolveAlias(handler, alias)).toEqual(`target.vhd`)
expect(await resolveAlias(handler, alias)).toEqual(`${tempDirFomRemoteUrl}/target.vhd`)
// different directory
await handler.mkdir(`sub`)
await handler.mkdir(`${tempDirFomRemoteUrl}/sub/`)
await handler.writeFile(alias, 'sub/target.vhd', { flags: 'w' })
await expect(await resolveAlias(handler, alias)).toEqual(`sub/target.vhd`)
expect(await resolveAlias(handler, alias)).toEqual(`${tempDirFomRemoteUrl}/sub/target.vhd`)
})
})
test('resolve throws an error an alias to an alias', async () => {
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const alias = `alias.alias.vhd`
const target = `target.alias.vhd`
const handler = yield getSyncedHandler({ url: 'file:///' })
const alias = `${tempDir}/alias.alias.vhd`
const target = `${tempDir}/target.alias.vhd`
await handler.writeFile(alias, target)
await expect(async () => await resolveAlias(handler, alias)).rejects.toThrow(Error)
})
})
test('resolve throws an error on a file too big ', async () => {
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
await handler.writeFile('toobig.alias.vhd', Buffer.alloc(ALIAS_MAX_PATH_LENGTH + 1, 0))
await expect(async () => await resolveAlias(handler, 'toobig.alias.vhd')).rejects.toThrow(Error)
expect(async () => await resolveAlias(handler, alias)).rejects.toThrow(Error)
})
})

View File

@@ -1,4 +1,3 @@
import { ALIAS_MAX_PATH_LENGTH } from './_constants'
import resolveRelativeFromFile from './_resolveRelativeFromFile'
export function isVhdAlias(filename) {
@@ -9,11 +8,6 @@ export async function resolveAlias(handler, filename) {
if (!isVhdAlias(filename)) {
return filename
}
const size = await handler.getSize(filename)
if (size > ALIAS_MAX_PATH_LENGTH) {
// seems reasonnable for a relative path
throw new Error(`The alias file ${filename} is too big (${size} bytes)`)
}
const aliasContent = (await handler.readFile(filename)).toString().trim()
// also handle circular references and unreasonnably long chains
if (isVhdAlias(aliasContent)) {

View File

@@ -162,7 +162,7 @@ export async function createVhdDirectoryFromStream(handler, path, inputStream, {
}
} catch (error) {
// cleanup on error
await handler.rmtree(path)
await handler.rmTree(path)
throw error
}
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xapi-explore-sr",
"version": "0.4.1",
"version": "0.4.0",
"license": "ISC",
"description": "Display the list of VDIs (unmanaged and snapshots included) of a SR",
"keywords": [
@@ -33,7 +33,6 @@
"node": ">=8"
},
"dependencies": {
"@xen-orchestra/defined": "^0.0.1",
"archy": "^1.0.0",
"chalk": "^4.1.0",
"exec-promise": "^0.7.0",

View File

@@ -8,6 +8,6 @@
"promise-toolbox": "^0.19.2",
"readable-stream": "^3.1.1",
"throttle": "^1.0.3",
"vhd-lib": "^2.0.3"
"vhd-lib": "^2.0.1"
}
}

View File

@@ -1,7 +1,7 @@
import asyncMapSettled from '@xen-orchestra/async-map/legacy'
import Handlebars from 'handlebars'
import humanFormat from 'human-format'
import { stringify } from 'csv-stringify'
import stringify from 'csv-stringify'
import { createLogger } from '@xen-orchestra/log'
import { createSchedule } from '@xen-orchestra/cron'
import { minify } from 'html-minifier'

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.84.3",
"version": "5.84.1",
"license": "AGPL-3.0-or-later",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -28,18 +28,18 @@
},
"dependencies": {
"@iarna/toml": "^2.2.1",
"@vates/compose": "^2.1.0",
"@vates/compose": "^2.0.0",
"@vates/decorate-with": "^0.1.0",
"@vates/disposable": "^0.1.1",
"@vates/multi-key-map": "^0.1.0",
"@vates/parse-duration": "^0.1.1",
"@vates/read-chunk": "^0.1.2",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.16.2",
"@xen-orchestra/backups": "^0.16.0",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/defined": "^0.0.1",
"@xen-orchestra/emit-async": "^0.1.0",
"@xen-orchestra/fs": "^0.19.2",
"@xen-orchestra/fs": "^0.19.1",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.1.1",
@@ -123,7 +123,7 @@
"unzipper": "^0.10.5",
"uuid": "^8.3.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^2.0.3",
"vhd-lib": "^2.0.1",
"ws": "^8.2.3",
"xdg-basedir": "^4.0.0",
"xen-api": "^0.35.1",
@@ -131,7 +131,7 @@
"xo-collection": "^0.5.0",
"xo-common": "^0.7.0",
"xo-remote-parser": "^0.8.0",
"xo-vmdk-to-vhd": "^2.0.1"
"xo-vmdk-to-vhd": "^2.0.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -117,7 +117,6 @@ const TRANSFORMS = {
cores: cpuInfo && +cpuInfo.cpu_count,
sockets: cpuInfo && +cpuInfo.socket_count,
},
suspendSr: link(obj, 'suspend_image_SR'),
zstdSupported: obj.restrictions.restrict_zstd_export === 'false',
// TODO
@@ -438,7 +437,6 @@ const TRANSFORMS = {
startDelay: +obj.start_delay,
startTime: metrics && toTimestamp(metrics.start_time),
secureBoot: obj.platform.secureboot === 'true',
suspendSr: link(obj, 'suspend_SR'),
tags: obj.tags,
VIFs: link(obj, 'VIFs'),
virtualizationMode: domainType,

View File

@@ -187,15 +187,6 @@ export default class BackupNg {
filter: createPredicate({
type: 'VM',
...vmsPattern,
// don't match VMs created by this very job
//
// otherwise replicated VMs would be matched and replicated again and again
other_config: {
__not: {
'xo:backup:job': job.id,
},
},
}),
})
)

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xo-vmdk-to-vhd",
"version": "2.0.1",
"version": "2.0.0",
"license": "AGPL-3.0-or-later",
"description": "JS lib streaming a vmdk file to a vhd",
"keywords": [
@@ -25,7 +25,7 @@
"lodash": "^4.17.15",
"pako": "^2.0.4",
"promise-toolbox": "^0.20.0",
"vhd-lib": "^2.0.3",
"vhd-lib": "^2.0.1",
"xml2js": "^0.4.23"
},
"devDependencies": {

View File

@@ -138,12 +138,12 @@
"xo-common": "^0.7.0",
"xo-lib": "^0.11.1",
"xo-remote-parser": "^0.8.0",
"xo-vmdk-to-vhd": "^2.0.1"
"xo-vmdk-to-vhd": "^2.0.0"
},
"scripts": {
"build": "GIT_HEAD=$(git rev-parse HEAD) NODE_ENV=production gulp build",
"build": "NODE_ENV=production gulp build",
"clean": "gulp clean",
"dev": "GIT_HEAD=$(git rev-parse HEAD) NODE_ENV=development gulp build",
"dev": "NODE_ENV=development gulp build",
"prebuild": "yarn run clean && index-modules --auto src",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"

View File

@@ -849,7 +849,6 @@ const messages = {
disconnectServer: 'Disconnect',
// ----- Host item ------
host: 'Host',
noMoreMaintained: 'This host version is no longer maintained',
// ----- Host actions ------
@@ -1267,7 +1266,6 @@ const messages = {
vmLimitsLabel: 'VM limits',
resourceSet: 'Resource set',
resourceSetNone: 'None',
suspendSr: 'Suspend SR',
vmCpuLimitsLabel: 'CPU limits',
vmCpuTopology: 'Topology',
vmChooseCoresPerSocket: 'Default behavior',
@@ -1381,9 +1379,6 @@ const messages = {
detachedBackups: 'Detached backups',
detachedVmSnapshots: 'Detached VM snapshots',
duplicatedMacAddresses: 'Duplicated MAC addresses',
localDefaultSrs: 'Local default SRs',
localDefaultSrsStatusTip:
"It is usually recommended for a pool's default SR to be shared to avoid unexpected behaviors",
missingJob: 'Missing job',
missingVm: 'Missing VM',
missingVmInJob: 'This VM does not belong to this job',
@@ -1397,7 +1392,6 @@ const messages = {
noOrphanedObject: 'No orphans',
tooManySnapshots: 'Too many snapshots',
tooManySnapshotsTip: 'VMs with more than the recommended amount of snapshots',
noLocalDefaultSrs: 'No local default SRs',
noTooManySnapshotsObject: 'No VMs with too many snapshots',
numberOfSnapshots: 'Number of snapshots',
guestToolStatus: 'Guest Tools status',

View File

@@ -1,50 +0,0 @@
import _ from 'intl'
import React from 'react'
import defined, { get } from '@xen-orchestra/defined'
import { injectState, provideState } from 'reaclette'
import decorate from './apply-decorators'
import Icon from './icon'
import renderXoItem from './render-xo-item'
import { connectStore } from './utils'
import { createGetObject } from './selectors'
import { editVm, editPool, isSrWritable } from './xo'
import { XoSelect } from './editable'
export const SelectSuspendSr = decorate([
connectStore({
suspendSr: createGetObject((_, { pool, vm }) => (vm || pool).suspendSr),
}),
provideState({
effects: {
onChange(_, value) {
const { isVm } = this.state
const method = isVm ? editVm : editPool
method(isVm ? this.props.vm : this.props.pool, {
suspendSr: defined(
get(() => value.id),
null
),
})
},
},
computed: {
isVm: (state, props) => props.vm !== undefined,
predicate: (state, props) => sr =>
isSrWritable(sr) && (state.isVm ? props.vm.$pool === sr.$pool : props.pool.id === sr.$pool),
},
}),
injectState,
({ effects: { onChange }, state: { predicate }, suspendSr }) => (
<span>
<XoSelect onChange={onChange} predicate={predicate} value={suspendSr} xoType='SR'>
{suspendSr !== undefined ? renderXoItem(suspendSr) : _('noValue')}
</XoSelect>{' '}
{suspendSr !== undefined && (
<a role='button' onClick={onChange}>
<Icon icon='remove' />
</a>
)}
</span>
),
])

View File

@@ -212,7 +212,7 @@ const CollapsedActions = decorate([
className={action.level !== undefined ? `text-${action.level}` : ''}
disabled={action.disabled}
key={key}
onClick={action.disabled ? undefined : () => effects.execute(action)}
onClick={() => effects.execute(action)}
>
<Icon icon={action.icon} /> {action.label}
</MenuItem>

View File

@@ -149,10 +149,6 @@
@extend .fa;
@extend .fa-keyboard-o;
}
&-git {
@extend .fa;
@extend .fa-git-square;
}
&-info {
@extend .fa;
@extend .fa-info-circle;

View File

@@ -12,8 +12,6 @@ import { connectStore, getXoaPlan } from 'utils'
import pkg from '../../../package'
const COMMIT_ID = process.env.GIT_HEAD
const HEADER = (
<Container>
<Row>
@@ -42,19 +40,8 @@ export default class About extends Component {
return (
<Page header={HEADER} title='aboutPage' formatTitle>
<Container className='text-xs-center'>
{isAdmin && [
process.env.XOA_PLAN > 4 && COMMIT_ID !== '' && (
<Row key='0'>
<Col>
<Icon icon='git' size={4} />
<h4>
Xen Orchestra, commit{' '}
<a href={'https://github.com/vatesfr/xen-orchestra/commit/' + COMMIT_ID}>{COMMIT_ID.slice(0, 5)}</a>
</h4>
</Col>
</Row>
),
<Row key='1'>
{isAdmin && (
<Row>
<Col mediumSize={6}>
<Icon icon='host' size={4} />
<Copiable tagName='h4' data={`xo-server ${this.state.serverVersion}`}>
@@ -69,8 +56,8 @@ export default class About extends Component {
</Copiable>
<p className='text-muted'>{_('xenOrchestraWeb')}</p>
</Col>
</Row>,
]}
</Row>
)}
{process.env.XOA_PLAN > 4 ? (
<div>
<Row>

View File

@@ -8,12 +8,12 @@ import NoObjects from 'no-objects'
import React from 'react'
import SortedTable from 'sorted-table'
import Tooltip from 'tooltip'
import { Host, Network, Pool, Sr, Vm } from 'render-xo-item'
import { Network, Sr, Vm } from 'render-xo-item'
import { SelectPool } from 'select-objects'
import { Container, Row, Col } from 'grid'
import { Card, CardHeader, CardBlock } from 'card'
import { FormattedRelative, FormattedTime } from 'react-intl'
import { countBy, filter, flatten, forEach, includes, isEmpty, map, pick } from 'lodash'
import { flatten, forEach, includes, isEmpty, map } from 'lodash'
import { connectStore, formatLogs, formatSize, noop, resolveIds } from 'utils'
import {
deleteMessage,
@@ -105,23 +105,6 @@ const DUPLICATED_MAC_ADDRESSES_FILTERS = {
filterOnlyRunningVms: 'nRunningVms:>1',
}
const LOCAL_DEFAULT_SRS_COLUMNS = [
{
name: _('pool'),
itemRenderer: pool => <Pool id={pool.id} link />,
sortCriteria: 'name_label',
},
{
name: _('sr'),
itemRenderer: pool => <Sr container={false} id={pool.default_SR} link spaceLeft={false} />,
},
{
name: _('host'),
itemRenderer: (pool, { srs }) => <Host id={srs[pool.default_SR].$container} link pool={false} />,
sortCriteria: (pool, { hosts, srs }) => hosts[srs[pool.default_SR].$container].name_label,
},
]
const SR_COLUMNS = [
{
name: _('srName'),
@@ -545,10 +528,8 @@ const HANDLED_VDI_TYPES = new Set(['system', 'user', 'ephemeral'])
return {
alertMessages: getAlertMessages,
areObjectsFetched,
hosts: createGetObjectsOfType('host'),
orphanVdis: getOrphanVdis,
orphanVmSnapshots: getOrphanVmSnapshots,
pools: createGetObjectsOfType('pool'),
tooManySnapshotsVms: getTooManySnapshotsVms,
guestToolsVms: getGuestToolsVms,
userSrs: getUserSrs,
@@ -604,22 +585,6 @@ export default class Health extends Component {
)
)
_getLocalDefaultSrs = createCollectionWrapper(
createSelector(
() => this.props.hosts,
() => this.props.pools,
() => this.props.userSrs,
() => this._getPoolIds(),
(hosts, pools, userSrs, poolIds) => {
const nbHostsPerPool = countBy(hosts, host => host.$pool)
return filter(
isEmpty(poolIds) ? pools : pick(pools, poolIds),
pool => !userSrs[pool.default_SR].shared && nbHostsPerPool[pool.id] > 1
)
}
)
)
_getPoolIds = createCollectionWrapper(createSelector(() => this.state.pools, resolveIds))
_getPoolPredicate = createSelector(this._getPoolIds, poolIds =>
@@ -651,7 +616,6 @@ export default class Health extends Component {
const { props, state } = this
const duplicatedMacAddresses = this._getDuplicatedMacAddresses()
const localDefaultSrs = this._getLocalDefaultSrs()
const userSrs = this._getUserSrs()
const orphanVdis = this._getOrphanVdis()
@@ -686,41 +650,6 @@ export default class Health extends Component {
</Card>
</Col>
</Row>
{localDefaultSrs.length > 0 && (
<Row>
<Col>
<Card>
<CardHeader>
<Icon icon='disk' /> {_('localDefaultSrs')}
</CardHeader>
<CardBlock>
<p>
<Icon icon='info' /> <em>{_('localDefaultSrsStatusTip')}</em>
</p>
<NoObjects
collection={props.areObjectsFetched ? localDefaultSrs : null}
emptyMessage={_('noLocalDefaultSrs')}
>
{() => (
<Row>
<Col>
<SortedTable
collection={localDefaultSrs}
columns={LOCAL_DEFAULT_SRS_COLUMNS}
data-hosts={props.hosts}
data-srs={userSrs}
shortcutsTarget='body'
stateUrlParam='s_local_default_srs'
/>
</Col>
</Row>
)}
</NoObjects>
</CardBlock>
</Card>
</Col>
</Row>
)}
<Row>
<Col>
<Card>

View File

@@ -32,7 +32,6 @@ import {
subscribePlugins,
synchronizeNetbox,
} from 'xo'
import { SelectSuspendSr } from 'select-suspend-sr'
@connectStore(() => ({
master: createGetObjectsOfType('host').find((_, { pool }) => ({
@@ -203,12 +202,6 @@ export default class TabAdvanced extends Component {
)}
</td>
</tr>
<tr>
<th>{_('suspendSr')}</th>
<td>
<SelectSuspendSr pool={pool} />
</td>
</tr>
</tbody>
</table>
</Col>

View File

@@ -50,7 +50,6 @@ import {
XEN_VIDEORAM_VALUES,
} from 'xo'
import { createGetObject, createGetObjectsOfType, createSelector, isAdmin } from 'selectors'
import { SelectSuspendSr } from 'select-suspend-sr'
import BootOrder from './boot-order'
@@ -799,12 +798,6 @@ export default class TabAdvanced extends Component {
<CustomFields object={vm.id} />
</td>
</tr>
<tr>
<th>{_('suspendSr')}</th>
<td>
<SelectSuspendSr vm={vm} />
</td>
</tr>
</tbody>
</table>
<br />