Compare commits
34 Commits
xo5/fix-bu
...
xo5/resour
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
12786511bb | ||
|
|
edcbf22d3f | ||
|
|
4fa4638e00 | ||
|
|
e23ff07792 | ||
|
|
26eb727ae3 | ||
|
|
b750d277aa | ||
|
|
bd2b6dbe2a | ||
|
|
2cd87e1b2c | ||
|
|
6eed3196bb | ||
|
|
662c2bd8cb | ||
|
|
ba9d4d4bb5 | ||
|
|
18dea2f2fe | ||
|
|
70c51227bf | ||
|
|
e162fd835b | ||
|
|
bcdcfbf20b | ||
|
|
a6e93c895c | ||
|
|
5c4f907358 | ||
|
|
e19dbc06fe | ||
|
|
287378f9c6 | ||
|
|
83a94eefd6 | ||
|
|
92fc19e2e3 | ||
|
|
521d31ac84 | ||
|
|
2b3ccb4b0e | ||
|
|
2498a4f47c | ||
|
|
dd61feeaf3 | ||
|
|
7851f8c196 | ||
|
|
404a764821 | ||
|
|
59cc418973 | ||
|
|
bc00551cb3 | ||
|
|
4d24248ea2 | ||
|
|
5c731fd56e | ||
|
|
79abb97b1f | ||
|
|
3314ba6e08 | ||
|
|
0fe8f8cac3 |
@@ -17,7 +17,7 @@
|
||||
"xo-lift-remote-immutability": "./liftProtection.mjs"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.1",
|
||||
"engines": {
|
||||
"node": ">=14.0.0"
|
||||
},
|
||||
|
||||
3
@xen-orchestra/xva/_formatBlockPath.mjs
Normal file
3
@xen-orchestra/xva/_formatBlockPath.mjs
Normal file
@@ -0,0 +1,3 @@
|
||||
const formatCounter = counter => String(counter).padStart(8, '0')
|
||||
|
||||
export const formatBlockPath = (basePath, counter) => `${basePath}/${formatCounter(counter)}`
|
||||
@@ -1,30 +1,50 @@
|
||||
import { formatBlockPath } from './_formatBlockPath.mjs'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
import { xxhash64 } from 'hash-wasm'
|
||||
|
||||
export const XVA_DISK_CHUNK_LENGTH = 1024 * 1024
|
||||
|
||||
async function addEntry(pack, name, buffer) {
|
||||
await fromCallback.call(pack, pack.entry, { name }, buffer)
|
||||
}
|
||||
|
||||
async function writeBlock(pack, data, name) {
|
||||
await fromCallback.call(pack, pack.entry, { name }, data)
|
||||
if (data.length < XVA_DISK_CHUNK_LENGTH) {
|
||||
data = Buffer.concat([data, Buffer.alloc(XVA_DISK_CHUNK_LENGTH - data.length, 0)])
|
||||
}
|
||||
await addEntry(pack, name, data)
|
||||
// weirdly, ocaml and xxhash return the bytes in reverse order to each other
|
||||
const hash = (await xxhash64(data)).toString('hex').toUpperCase()
|
||||
await fromCallback.call(pack, pack.entry, { name: `${name}.xxhash` }, Buffer.from(hash, 'utf8'))
|
||||
await addEntry(pack, `${name}.xxhash`, Buffer.from(hash, 'utf8'))
|
||||
}
|
||||
|
||||
export default async function addDisk(pack, vhd, basePath) {
|
||||
let counter = 0
|
||||
let written
|
||||
const chunk_length = 1024 * 1024
|
||||
const empty = Buffer.alloc(chunk_length, 0)
|
||||
let lastBlockWrittenAt = Date.now()
|
||||
const MAX_INTERVAL_BETWEEN_BLOCKS = 60 * 1000
|
||||
const empty = Buffer.alloc(XVA_DISK_CHUNK_LENGTH, 0)
|
||||
const stream = await vhd.rawContent()
|
||||
let lastBlockLength
|
||||
const diskSize = vhd.footer.currentSize
|
||||
let remaining = diskSize
|
||||
while (remaining > 0) {
|
||||
const data = await readChunkStrict(stream, Math.min(chunk_length, remaining))
|
||||
lastBlockLength = data.length
|
||||
lastBlockLength = Math.min(XVA_DISK_CHUNK_LENGTH, remaining)
|
||||
const data = await readChunkStrict(stream, lastBlockLength)
|
||||
remaining -= lastBlockLength
|
||||
|
||||
if (counter === 0 || !data.equals(empty)) {
|
||||
if (
|
||||
// write first block
|
||||
counter === 0 ||
|
||||
// write all non empty blocks
|
||||
!data.equals(empty) ||
|
||||
// write one block from time to time to ensure there is no timeout
|
||||
// occurring while passing empty blocks
|
||||
Date.now() - lastBlockWrittenAt > MAX_INTERVAL_BETWEEN_BLOCKS
|
||||
) {
|
||||
written = true
|
||||
await writeBlock(pack, data, `${basePath}/${('' + counter).padStart(8, '0')}`)
|
||||
await writeBlock(pack, data, formatBlockPath(basePath, counter))
|
||||
lastBlockWrittenAt = Date.now()
|
||||
} else {
|
||||
written = false
|
||||
}
|
||||
@@ -32,6 +52,6 @@ export default async function addDisk(pack, vhd, basePath) {
|
||||
}
|
||||
if (!written) {
|
||||
// last block must be present
|
||||
writeBlock(pack, empty.slice(0, lastBlockLength), `${basePath}/${counter}`)
|
||||
await writeBlock(pack, empty, formatBlockPath(basePath, counter - 1))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import { DEFAULT_VDI } from './templates/vdi.mjs'
|
||||
import { DEFAULT_VIF } from './templates/vif.mjs'
|
||||
import { DEFAULT_VM } from './templates/vm.mjs'
|
||||
import toOvaXml from './_toOvaXml.mjs'
|
||||
import { XVA_DISK_CHUNK_LENGTH } from './_writeDisk.mjs'
|
||||
|
||||
export default async function writeOvaXml(
|
||||
pack,
|
||||
@@ -79,11 +80,12 @@ export default async function writeOvaXml(
|
||||
for (let index = 0; index < vhds.length; index++) {
|
||||
const userdevice = index + 1
|
||||
const vhd = vhds[index]
|
||||
const alignedSize = Math.ceil(vdis[index].virtual_size / XVA_DISK_CHUNK_LENGTH) * XVA_DISK_CHUNK_LENGTH
|
||||
const vdi = defaultsDeep(
|
||||
{
|
||||
id: nextRef(),
|
||||
// overwrite SR from an opaqref to a ref:
|
||||
snapshot: { ...vdis[index], SR: srObj.id },
|
||||
// overwrite SR from an opaque ref to a ref:
|
||||
snapshot: { ...vdis[index], SR: srObj.id, virtual_size: alignedSize },
|
||||
},
|
||||
{
|
||||
snapshot: {
|
||||
|
||||
@@ -25,8 +25,7 @@ export async function importVm(vm, xapi, sr, network) {
|
||||
const str = await promise
|
||||
const matches = /OpaqueRef:[0-9a-z-]+/.exec(str)
|
||||
if (!matches) {
|
||||
const error = new Error('no opaque ref found')
|
||||
error.haystack = str
|
||||
const error = new Error(`no opaque ref found in ${str}`)
|
||||
throw error
|
||||
}
|
||||
return matches[0]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/xva",
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.2",
|
||||
"main": "index.js",
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
|
||||
37
CHANGELOG.md
37
CHANGELOG.md
@@ -1,5 +1,42 @@
|
||||
# ChangeLog
|
||||
|
||||
## **next**
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [REST API] Add `/groups` collection [Forum#70500](https://xcp-ng.org/forum/post/70500)
|
||||
- [REST API] Add `/groups/:id/users` and `/users/:id/groups` collection [Forum#70500](https://xcp-ng.org/forum/post/70500)
|
||||
- [REST API] Expose messages associated to XAPI objects at `/:collection/:object/messages`
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Import/VMWare] Fix `(Failure \"Expected string, got 'I(0)'\")` (PR [#7361](https://github.com/vatesfr/xen-orchestra/issues/7361))
|
||||
- [Plugin/load-balancer] Fixing `TypeError: Cannot read properties of undefined (reading 'high')` happening when trying to optimize a host with performance plan [#7359](https://github.com/vatesfr/xen-orchestra/issues/7359) (PR [#7362](https://github.com/vatesfr/xen-orchestra/pull/7362))
|
||||
- Changing the number of displayed items per page should send back to the first page [#7350](https://github.com/vatesfr/xen-orchestra/issues/7350)
|
||||
- [Plugin/load-balancer] Correctly create a _simple_ instead of a _density_ plan when it is selected (PR [#7358](https://github.com/vatesfr/xen-orchestra/pull/7358))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server 5.136.0
|
||||
- xo-server-load-balancer 0.8.1
|
||||
- xo-web 5.136.1
|
||||
|
||||
## **5.91.1** (2024-02-06)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Import/VMWare] Fix `Error: task has been destroyed before completion` with XVA import [Forum#70513](https://xcp-ng.org/forum/post/70513)
|
||||
- [Import/VM] Fix `UUID_INVALID(VM, OpaqueRef:...)` error when importing from URL
|
||||
- [Proxies] Fix `xapi.getOrWaitObject is not a function` is not a function during deployment
|
||||
- [REST API] Fix empty object's tasks list
|
||||
- [REST API] Fix incorrect `href` in `/:collection/:object/tasks`
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/immutable-backups 1.0.1
|
||||
- @xen-orchestra/xva 1.0.2
|
||||
- xo-server 5.135.1
|
||||
|
||||
## **5.91.0** (2024-01-31)
|
||||
|
||||
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
|
||||
|
||||
@@ -7,15 +7,12 @@
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Self service] From user POV, show used resources even when they are unlimited (PR [#7353](https://github.com/vatesfr/xen-orchestra/pull/7353))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [Import/VM] Fix `UUID_INVALID(VM, OpaqueRef:...)` error when importing from URL
|
||||
- [Proxies] Fix `xapi.getOrWaitObject is not a function` is not a function during deployment
|
||||
- [REST API] Fix empty object's tasks list
|
||||
- [REST API] Fix incorrect `href` in `/:collection/:object/tasks`
|
||||
|
||||
### Packages to release
|
||||
|
||||
> When modifying a package, add it here with its release type.
|
||||
@@ -32,8 +29,7 @@
|
||||
|
||||
<!--packages-start-->
|
||||
|
||||
- @xen-orchestra/immutable-backups patch
|
||||
- @xen-orchestra/xva patch
|
||||
- xo-server patch
|
||||
- xo-server minor
|
||||
- xo-web minor
|
||||
|
||||
<!--packages-end-->
|
||||
|
||||
@@ -34,9 +34,8 @@ But it's not the only way to see this: there is multiple possibilities to "optim
|
||||
|
||||
- maybe you want to spread the VM load on the maximum number of server, to get the most of your hardware? (previous example)
|
||||
- maybe you want to reduce power consumption and migrate your VMs to the minimum number of hosts possible? (and shutdown useless hosts)
|
||||
- or maybe both, depending of your own schedule?
|
||||
|
||||
Those ways can be also called modes: "performance" for 1, "density" for number 2 and "mixed" for the last.
|
||||
Those ways can be also called modes: "performance" for 1 and "density" for number 2.
|
||||
|
||||
## Configure a plan
|
||||
|
||||
@@ -47,7 +46,6 @@ A plan has:
|
||||
- a name
|
||||
- pool(s) where to apply the policy
|
||||
- a mode (see paragraph below)
|
||||
- a behavior (aggressive, normal, low)
|
||||
|
||||
### Plan modes
|
||||
|
||||
@@ -55,7 +53,7 @@ There are 3 modes possible:
|
||||
|
||||
- performance
|
||||
- density
|
||||
- mixed
|
||||
- simple
|
||||
|
||||
#### Performance
|
||||
|
||||
@@ -65,14 +63,9 @@ VMs are placed to use all possible resources. This means balance the load to giv
|
||||
|
||||
This time, the objective is to use the least hosts possible, and to concentrate your VMs. In this mode, you can choose to shutdown unused (and compatible) hosts.
|
||||
|
||||
#### Mixed
|
||||
#### Simple
|
||||
|
||||
This mode allows you to use both performance and density, but alternatively, depending of a schedule. E.g:
|
||||
|
||||
- **performance** from 6:00 AM to 7:00 PM
|
||||
- **density** from 7:01 PM to 5:59 AM
|
||||
|
||||
In this case, you'll have the best of both when needed (energy saving during the night and performance during the day).
|
||||
This mode allows you to use VM anti-affinity without using any load balancing mechanism. (see paragraph below)
|
||||
|
||||
### Threshold
|
||||
|
||||
@@ -87,6 +80,10 @@ If the CPU threshold is set to 90%, the load balancer will be only triggered if
|
||||
|
||||
For free memory, it will be triggered if there is **less** free RAM than the threshold.
|
||||
|
||||
### Exclusion
|
||||
|
||||
If you want to prevent load balancing from triggering migrations on a particular host or VM, it is possible to exclude it from load balancing. It can be configured via the "Excluded hosts" parameter in each plan, and in the "Ignored VM tags" parameter which is common to every plan.
|
||||
|
||||
### Timing
|
||||
|
||||
The global situation (resource usage) is examined **every minute**.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-load-balancer",
|
||||
"version": "0.8.0",
|
||||
"version": "0.8.1",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Load balancer for XO-Server",
|
||||
"keywords": [
|
||||
|
||||
@@ -12,6 +12,8 @@ import { EXECUTION_DELAY, debug } from './utils'
|
||||
|
||||
const PERFORMANCE_MODE = 0
|
||||
const DENSITY_MODE = 1
|
||||
const SIMPLE_MODE = 2
|
||||
const MODES = { 'Performance mode': PERFORMANCE_MODE, 'Density mode': DENSITY_MODE, 'Simple mode': SIMPLE_MODE }
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -35,7 +37,7 @@ export const configurationSchema = {
|
||||
},
|
||||
|
||||
mode: {
|
||||
enum: ['Performance mode', 'Density mode', 'Simple mode'],
|
||||
enum: Object.keys(MODES),
|
||||
title: 'Mode',
|
||||
},
|
||||
|
||||
@@ -147,7 +149,7 @@ class LoadBalancerPlugin {
|
||||
|
||||
if (plans) {
|
||||
for (const plan of plans) {
|
||||
this._addPlan(plan.mode === 'Performance mode' ? PERFORMANCE_MODE : DENSITY_MODE, plan)
|
||||
this._addPlan(MODES[plan.mode], plan)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,7 +178,7 @@ export default class PerformancePlan extends Plan {
|
||||
const state = this._getThresholdState(exceededAverages)
|
||||
if (
|
||||
destinationAverages.cpu + vmAverages.cpu >= this._thresholds.cpu.low ||
|
||||
destinationAverages.memoryFree - vmAverages.memory <= this._thresholds.memory.high ||
|
||||
destinationAverages.memoryFree - vmAverages.memory <= this._thresholds.memoryFree.high ||
|
||||
(!state.cpu &&
|
||||
!state.memory &&
|
||||
(exceededAverages.cpu - vmAverages.cpu < destinationAverages.cpu + vmAverages.cpu ||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "xo-server",
|
||||
"version": "5.135.0",
|
||||
"version": "5.136.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Server part of Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -53,7 +53,7 @@
|
||||
"@xen-orchestra/template": "^0.1.0",
|
||||
"@xen-orchestra/vmware-explorer": "^0.4.0",
|
||||
"@xen-orchestra/xapi": "^4.2.0",
|
||||
"@xen-orchestra/xva": "^1.0.0",
|
||||
"@xen-orchestra/xva": "^1.0.2",
|
||||
"ajv": "^8.0.3",
|
||||
"app-conf": "^2.3.0",
|
||||
"async-iterator-to-stream": "^1.0.1",
|
||||
|
||||
@@ -48,15 +48,18 @@ const getLogs = (db, args) => {
|
||||
|
||||
const deleteLogs = (db, args) =>
|
||||
new Promise(resolve => {
|
||||
let count = 1
|
||||
let nDeleted = 0
|
||||
let nRunning = 1
|
||||
const cb = () => {
|
||||
if (--count === 0) {
|
||||
if (--nRunning === 0) {
|
||||
console.log(nDeleted.toLocaleString(), 'deleted entries')
|
||||
resolve()
|
||||
}
|
||||
}
|
||||
|
||||
const deleteEntry = key => {
|
||||
++count
|
||||
++nDeleted
|
||||
++nRunning
|
||||
db.del(key, cb)
|
||||
}
|
||||
|
||||
|
||||
@@ -410,7 +410,9 @@ export default class Api {
|
||||
// 2021-02-11: Work-around: ECONNREFUSED error can be triggered by
|
||||
// 'host.stats' method because there is no connection to the host during a
|
||||
// toolstack restart and xo-web may call it often
|
||||
if (name !== 'pool.listMissingPatches' && name !== 'host.stats') {
|
||||
// 2024-02-05: Work-around: in case of XO Proxy errors, `proxy.getApplianceUpdaterState` will
|
||||
// flood the logs.
|
||||
if (name !== 'pool.listMissingPatches' && name !== 'host.stats' && name !== 'proxy.getApplianceUpdaterState') {
|
||||
this._logger.error(message, {
|
||||
...data,
|
||||
duration: Date.now() - startTime,
|
||||
|
||||
@@ -249,7 +249,7 @@ export default class MigrateVm {
|
||||
const disk = chainByNode[diskIndex]
|
||||
const { capacity, descriptionLabel, fileName, nameLabel, path, datastore, isFull } = disk
|
||||
if (isFull) {
|
||||
vhd = await VhdEsxiRaw.open(esxi, datastore, path + '/' + fileName)
|
||||
vhd = await VhdEsxiRaw.open(esxi, datastore, path + '/' + fileName, { thin: false })
|
||||
// we don't need to read the BAT with the importVdiThroughXva process
|
||||
const vdiMetadata = {
|
||||
name_description: 'fromESXI' + descriptionLabel,
|
||||
@@ -258,11 +258,14 @@ export default class MigrateVm {
|
||||
virtual_size: capacity,
|
||||
}
|
||||
vdi = await importVdiThroughXva(vdiMetadata, vhd, xapi, sr)
|
||||
|
||||
// it can fail before the vdi is connected to the vm
|
||||
$defer.onFailure.call(xapi, 'VDI_destroy', vdi.$ref)
|
||||
await xapi.VBD_create({
|
||||
VDI: vdi.$ref,
|
||||
VM: vm.$ref,
|
||||
device: `xvd${String.fromCharCode('a'.charCodeAt(0) + userdevice)}`,
|
||||
userdevice: String(userdevice < 3 ? userdevice : userdevice + 1),
|
||||
})
|
||||
} else {
|
||||
vhd = await openDeltaVmdkasVhd(esxi, datastore, path + '/' + fileName, parentVhd, {
|
||||
@@ -312,6 +315,8 @@ export default class MigrateVm {
|
||||
await xapi.VBD_create({
|
||||
VDI: vdi.$ref,
|
||||
VM: vm.$ref,
|
||||
device: `xvd${String.fromCharCode('a'.charCodeAt(0) + userdevice)}`,
|
||||
userdevice: String(userdevice < 3 ? userdevice : userdevice + 1),
|
||||
})
|
||||
} else {
|
||||
if (parentVhd === undefined) {
|
||||
|
||||
@@ -55,10 +55,10 @@ const normalize = set => ({
|
||||
limits: set.limits
|
||||
? map(set.limits, limit =>
|
||||
isObject(limit)
|
||||
? limit
|
||||
? { ...limit, usage: limit.usage ?? 0 }
|
||||
: {
|
||||
available: limit,
|
||||
total: limit,
|
||||
usage: 0,
|
||||
}
|
||||
)
|
||||
: {},
|
||||
@@ -217,25 +217,32 @@ export default class {
|
||||
if (objects) {
|
||||
set.objects = objects
|
||||
}
|
||||
if (limits) {
|
||||
const previousLimits = set.limits
|
||||
set.limits = map(limits, (quantity, id) => {
|
||||
const previous = previousLimits[id]
|
||||
if (!previous) {
|
||||
return {
|
||||
available: quantity,
|
||||
total: quantity,
|
||||
}
|
||||
}
|
||||
|
||||
const { available, total } = previous
|
||||
|
||||
return {
|
||||
available: available - total + quantity,
|
||||
const previousLimits = set.limits
|
||||
const newLimits = {}
|
||||
forEach(limits, (quantity, id) => {
|
||||
const previous = previousLimits[id]
|
||||
if (previous !== undefined) {
|
||||
newLimits[id] = {
|
||||
total: quantity,
|
||||
usage: previous.usage,
|
||||
}
|
||||
})
|
||||
}
|
||||
} else {
|
||||
newLimits[id] = {
|
||||
total: quantity,
|
||||
usage: 0,
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
const removedLimits = Object.keys(previousLimits).filter(key => !(key in newLimits))
|
||||
removedLimits.forEach(id => {
|
||||
newLimits[id] = {
|
||||
usage: previousLimits[id].usage ?? 0,
|
||||
}
|
||||
})
|
||||
set.limits = newLimits
|
||||
|
||||
if (ipPools) {
|
||||
set.ipPools = ipPools
|
||||
}
|
||||
@@ -332,15 +339,16 @@ export default class {
|
||||
forEach(limits, (quantity, id) => {
|
||||
const limit = set.limits[id]
|
||||
if (!limit) {
|
||||
set.limits[id] = { usage: quantity }
|
||||
return
|
||||
}
|
||||
|
||||
if ((limit.available -= quantity) < 0 && !force) {
|
||||
if ((limit.usage += quantity) > limit.total && !force) {
|
||||
throw notEnoughResources([
|
||||
{
|
||||
resourceSet: setId,
|
||||
resourceType: id,
|
||||
available: limit.available + quantity,
|
||||
available: limit.total - (limit.usage - quantity),
|
||||
requested: quantity,
|
||||
},
|
||||
])
|
||||
@@ -358,8 +366,8 @@ export default class {
|
||||
return
|
||||
}
|
||||
|
||||
if ((limit.available += quantity) > limit.total) {
|
||||
limit.available = limit.total
|
||||
if ((limit.usage -= quantity) < 0) {
|
||||
limit.usage = 0
|
||||
}
|
||||
})
|
||||
await this._save(set)
|
||||
@@ -371,7 +379,7 @@ export default class {
|
||||
forEach(limits, (limit, id) => {
|
||||
if (VM_RESOURCES[id] || id.startsWith('ipPool:')) {
|
||||
// only reset VMs related limits
|
||||
limit.available = limit.total
|
||||
limit.usage = 0
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -397,7 +405,9 @@ export default class {
|
||||
forEach(await this.computeResourcesUsage(this._app.getObject(object.$id)), (usage, resource) => {
|
||||
const limit = limits[resource]
|
||||
if (limit) {
|
||||
limit.available -= usage
|
||||
limit.usage += usage
|
||||
} else {
|
||||
limits[resource] = { usage }
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -100,6 +100,17 @@ async function sendObjects(iterable, req, res, path = req.path) {
|
||||
return pipeline(makeObjectsStream(iterable, makeResult, json, res), res)
|
||||
}
|
||||
|
||||
function handleArray(array, filter, limit) {
|
||||
if (filter !== undefined) {
|
||||
array = array.filter(filter)
|
||||
}
|
||||
if (limit < array.length) {
|
||||
array.length = limit
|
||||
}
|
||||
|
||||
return array
|
||||
}
|
||||
|
||||
const handleOptionalUserFilter = filter => filter && CM.parse(filter).createPredicate()
|
||||
|
||||
const subRouter = (app, path) => {
|
||||
@@ -160,77 +171,7 @@ export default class RestApi {
|
||||
)
|
||||
})
|
||||
|
||||
const types = [
|
||||
'host',
|
||||
'network',
|
||||
'pool',
|
||||
'SR',
|
||||
'VBD',
|
||||
'VDI-snapshot',
|
||||
'VDI',
|
||||
'VIF',
|
||||
'VM-snapshot',
|
||||
'VM-template',
|
||||
'VM',
|
||||
]
|
||||
const collections = Object.fromEntries(
|
||||
types.map(type => {
|
||||
const id = type.toLocaleLowerCase() + 's'
|
||||
return [id, { id, isCorrectType: _ => _.type === type, type }]
|
||||
})
|
||||
)
|
||||
|
||||
collections.backup = { id: 'backup' }
|
||||
collections.restore = { id: 'restore' }
|
||||
collections.tasks = { id: 'tasks' }
|
||||
collections.users = { id: 'users' }
|
||||
|
||||
collections.hosts.routes = {
|
||||
__proto__: null,
|
||||
|
||||
async 'audit.txt'(req, res) {
|
||||
const host = req.xapiObject
|
||||
|
||||
res.setHeader('content-type', 'text/plain')
|
||||
await pipeline(await host.$xapi.getResource('/audit_log', { host }), compressMaybe(req, res))
|
||||
},
|
||||
|
||||
async 'logs.tar'(req, res) {
|
||||
const host = req.xapiObject
|
||||
|
||||
res.setHeader('content-type', 'application/x-tar')
|
||||
await pipeline(await host.$xapi.getResource('/host_logs_download', { host }), compressMaybe(req, res))
|
||||
},
|
||||
|
||||
async missing_patches(req, res) {
|
||||
await app.checkFeatureAuthorization('LIST_MISSING_PATCHES')
|
||||
|
||||
const host = req.xapiObject
|
||||
res.json(await host.$xapi.listMissingPatches(host))
|
||||
},
|
||||
}
|
||||
|
||||
collections.pools.routes = {
|
||||
__proto__: null,
|
||||
|
||||
async missing_patches(req, res) {
|
||||
await app.checkFeatureAuthorization('LIST_MISSING_PATCHES')
|
||||
|
||||
const xapi = req.xapiObject.$xapi
|
||||
const missingPatches = new Map()
|
||||
await asyncEach(Object.values(xapi.objects.indexes.type.host ?? {}), async host => {
|
||||
try {
|
||||
for (const patch of await xapi.listMissingPatches(host)) {
|
||||
const { uuid: key = `${patch.name}-${patch.version}-${patch.release}` } = patch
|
||||
missingPatches.set(key, patch)
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn(host.uuid, error)
|
||||
}
|
||||
})
|
||||
res.json(Array.from(missingPatches.values()))
|
||||
},
|
||||
}
|
||||
const collections = { __proto__: null }
|
||||
|
||||
const withParams = (fn, paramsSchema) => {
|
||||
fn.params = paramsSchema
|
||||
@@ -238,68 +179,231 @@ export default class RestApi {
|
||||
return fn
|
||||
}
|
||||
|
||||
collections.pools.actions = {
|
||||
__proto__: null,
|
||||
{
|
||||
const types = [
|
||||
'host',
|
||||
'message',
|
||||
'network',
|
||||
'pool',
|
||||
'SR',
|
||||
'VBD',
|
||||
'VDI-snapshot',
|
||||
'VDI',
|
||||
'VIF',
|
||||
'VM-snapshot',
|
||||
'VM-template',
|
||||
'VM',
|
||||
]
|
||||
function getObject(id, req) {
|
||||
const { type } = this
|
||||
const object = app.getObject(id, type)
|
||||
|
||||
create_vm: withParams(
|
||||
defer(async ($defer, { xapiObject: { $xapi } }, { affinity, boot, install, template, ...params }, req) => {
|
||||
params.affinityHost = affinity
|
||||
params.installRepository = install?.repository
|
||||
// add also the XAPI version of the object
|
||||
req.xapiObject = app.getXapiObject(object)
|
||||
|
||||
const vm = await $xapi.createVm(template, params, undefined, req.user.id)
|
||||
$defer.onFailure.call($xapi, 'VM_destroy', vm.$ref)
|
||||
return object
|
||||
}
|
||||
function getObjects(filter, limit) {
|
||||
return app.getObjects({
|
||||
filter: every(this.isCorrectType, filter),
|
||||
limit,
|
||||
})
|
||||
}
|
||||
async function messages(req, res) {
|
||||
const {
|
||||
object: { id },
|
||||
query,
|
||||
} = req
|
||||
await sendObjects(
|
||||
app.getObjects({
|
||||
filter: every(_ => _.type === 'message' && _.$object === id, handleOptionalUserFilter(query.filter)),
|
||||
limit: ifDef(query.limit, Number),
|
||||
}),
|
||||
req,
|
||||
res,
|
||||
'/messages'
|
||||
)
|
||||
}
|
||||
for (const type of types) {
|
||||
const id = type.toLocaleLowerCase() + 's'
|
||||
|
||||
if (boot) {
|
||||
await $xapi.callAsync('VM.start', vm.$ref, false, false)
|
||||
}
|
||||
collections[id] = { getObject, getObjects, routes: { messages }, isCorrectType: _ => _.type === type, type }
|
||||
}
|
||||
|
||||
return vm.uuid
|
||||
}),
|
||||
{
|
||||
affinity: { type: 'string', optional: true },
|
||||
auto_poweron: { type: 'boolean', optional: true },
|
||||
boot: { type: 'boolean', default: false },
|
||||
clone: { type: 'boolean', default: true },
|
||||
install: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
properties: {
|
||||
method: { enum: ['cdrom', 'network'] },
|
||||
repository: { type: 'string' },
|
||||
collections.hosts.routes = {
|
||||
...collections.hosts.routes,
|
||||
|
||||
async 'audit.txt'(req, res) {
|
||||
const host = req.xapiObject
|
||||
|
||||
res.setHeader('content-type', 'text/plain')
|
||||
await pipeline(await host.$xapi.getResource('/audit_log', { host }), compressMaybe(req, res))
|
||||
},
|
||||
|
||||
async 'logs.tar'(req, res) {
|
||||
const host = req.xapiObject
|
||||
|
||||
res.setHeader('content-type', 'application/x-tar')
|
||||
await pipeline(await host.$xapi.getResource('/host_logs_download', { host }), compressMaybe(req, res))
|
||||
},
|
||||
|
||||
async missing_patches(req, res) {
|
||||
await app.checkFeatureAuthorization('LIST_MISSING_PATCHES')
|
||||
|
||||
const host = req.xapiObject
|
||||
res.json(await host.$xapi.listMissingPatches(host))
|
||||
},
|
||||
}
|
||||
|
||||
collections.pools.routes = {
|
||||
...collections.pools.routes,
|
||||
|
||||
async missing_patches(req, res) {
|
||||
await app.checkFeatureAuthorization('LIST_MISSING_PATCHES')
|
||||
|
||||
const xapi = req.xapiObject.$xapi
|
||||
const missingPatches = new Map()
|
||||
await asyncEach(Object.values(xapi.objects.indexes.type.host ?? {}), async host => {
|
||||
try {
|
||||
for (const patch of await xapi.listMissingPatches(host)) {
|
||||
const { uuid: key = `${patch.name}-${patch.version}-${patch.release}` } = patch
|
||||
missingPatches.set(key, patch)
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn(host.uuid, error)
|
||||
}
|
||||
})
|
||||
res.json(Array.from(missingPatches.values()))
|
||||
},
|
||||
}
|
||||
|
||||
collections.pools.actions = {
|
||||
create_vm: withParams(
|
||||
defer(async ($defer, { xapiObject: { $xapi } }, { affinity, boot, install, template, ...params }, req) => {
|
||||
params.affinityHost = affinity
|
||||
params.installRepository = install?.repository
|
||||
|
||||
const vm = await $xapi.createVm(template, params, undefined, req.user.id)
|
||||
$defer.onFailure.call($xapi, 'VM_destroy', vm.$ref)
|
||||
|
||||
if (boot) {
|
||||
await $xapi.callAsync('VM.start', vm.$ref, false, false)
|
||||
}
|
||||
|
||||
return vm.uuid
|
||||
}),
|
||||
{
|
||||
affinity: { type: 'string', optional: true },
|
||||
auto_poweron: { type: 'boolean', optional: true },
|
||||
boot: { type: 'boolean', default: false },
|
||||
clone: { type: 'boolean', default: true },
|
||||
install: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
properties: {
|
||||
method: { enum: ['cdrom', 'network'] },
|
||||
repository: { type: 'string' },
|
||||
},
|
||||
},
|
||||
memory: { type: 'integer', optional: true },
|
||||
name_description: { type: 'string', minLength: 0, optional: true },
|
||||
name_label: { type: 'string' },
|
||||
template: { type: 'string' },
|
||||
}
|
||||
),
|
||||
emergency_shutdown: async ({ xapiObject }) => {
|
||||
await app.checkFeatureAuthorization('POOL_EMERGENCY_SHUTDOWN')
|
||||
|
||||
await xapiObject.$xapi.pool_emergencyShutdown()
|
||||
},
|
||||
rolling_update: async ({ object }) => {
|
||||
await app.checkFeatureAuthorization('ROLLING_POOL_UPDATE')
|
||||
|
||||
await app.rollingPoolUpdate(object)
|
||||
},
|
||||
}
|
||||
collections.vms.actions = {
|
||||
clean_reboot: ({ xapiObject: vm }) => vm.$callAsync('clean_reboot').then(noop),
|
||||
clean_shutdown: ({ xapiObject: vm }) => vm.$callAsync('clean_shutdown').then(noop),
|
||||
hard_reboot: ({ xapiObject: vm }) => vm.$callAsync('hard_reboot').then(noop),
|
||||
hard_shutdown: ({ xapiObject: vm }) => vm.$callAsync('hard_shutdown').then(noop),
|
||||
snapshot: withParams(
|
||||
async ({ xapiObject: vm }, { name_label }) => {
|
||||
const ref = await vm.$snapshot({ name_label })
|
||||
return vm.$xapi.getField('VM', ref, 'uuid')
|
||||
},
|
||||
memory: { type: 'integer', optional: true },
|
||||
name_description: { type: 'string', minLength: 0, optional: true },
|
||||
name_label: { type: 'string' },
|
||||
template: { type: 'string' },
|
||||
}
|
||||
),
|
||||
emergency_shutdown: async ({ xapiObject }) => {
|
||||
await app.checkFeatureAuthorization('POOL_EMERGENCY_SHUTDOWN')
|
||||
{ name_label: { type: 'string', optional: true } }
|
||||
),
|
||||
start: ({ xapiObject: vm }) => vm.$callAsync('start', false, false).then(noop),
|
||||
}
|
||||
}
|
||||
|
||||
await xapiObject.$xapi.pool_emergencyShutdown()
|
||||
collections.backup = {}
|
||||
collections.groups = {
|
||||
getObject(id) {
|
||||
return app.getGroup(id)
|
||||
},
|
||||
rolling_update: async ({ xoObject }) => {
|
||||
await app.checkFeatureAuthorization('ROLLING_POOL_UPDATE')
|
||||
|
||||
await app.rollingPoolUpdate(xoObject)
|
||||
async getObjects(filter, limit) {
|
||||
return handleArray(await app.getAllGroups(), filter, limit)
|
||||
},
|
||||
routes: {
|
||||
async users(req, res) {
|
||||
const { filter, limit } = req.query
|
||||
await sendObjects(
|
||||
handleArray(
|
||||
await Promise.all(req.object.users.map(id => app.getUser(id).then(getUserPublicProperties))),
|
||||
handleOptionalUserFilter(filter),
|
||||
ifDef(limit, Number)
|
||||
),
|
||||
req,
|
||||
res,
|
||||
'/users'
|
||||
)
|
||||
},
|
||||
},
|
||||
}
|
||||
collections.vms.actions = {
|
||||
__proto__: null,
|
||||
|
||||
clean_reboot: ({ xapiObject: vm }) => vm.$callAsync('clean_reboot').then(noop),
|
||||
clean_shutdown: ({ xapiObject: vm }) => vm.$callAsync('clean_shutdown').then(noop),
|
||||
hard_reboot: ({ xapiObject: vm }) => vm.$callAsync('hard_reboot').then(noop),
|
||||
hard_shutdown: ({ xapiObject: vm }) => vm.$callAsync('hard_shutdown').then(noop),
|
||||
snapshot: withParams(
|
||||
async ({ xapiObject: vm }, { name_label }) => {
|
||||
const ref = await vm.$snapshot({ name_label })
|
||||
return vm.$xapi.getField('VM', ref, 'uuid')
|
||||
collections.restore = {}
|
||||
collections.tasks = {}
|
||||
collections.users = {
|
||||
getObject(id) {
|
||||
return app.getUser(id).then(getUserPublicProperties)
|
||||
},
|
||||
async getObjects(filter, limit) {
|
||||
return handleArray(await app.getAllUsers(), filter, limit)
|
||||
},
|
||||
routes: {
|
||||
async groups(req, res) {
|
||||
const { filter, limit } = req.query
|
||||
await sendObjects(
|
||||
handleArray(
|
||||
await Promise.all(req.object.groups.map(id => app.getGroup(id))),
|
||||
handleOptionalUserFilter(filter),
|
||||
ifDef(limit, Number)
|
||||
),
|
||||
req,
|
||||
res,
|
||||
'/groups'
|
||||
)
|
||||
},
|
||||
{ name_label: { type: 'string', optional: true } }
|
||||
),
|
||||
start: ({ xapiObject: vm }) => vm.$callAsync('start', false, false).then(noop),
|
||||
},
|
||||
}
|
||||
|
||||
// normalize collections
|
||||
for (const id of Object.keys(collections)) {
|
||||
const collection = collections[id]
|
||||
|
||||
// inject id into the collection
|
||||
collection.id = id
|
||||
|
||||
// set null as prototypes to speed-up look-ups
|
||||
Object.setPrototypeOf(collection, null)
|
||||
const { actions, routes } = collection
|
||||
if (actions !== undefined) {
|
||||
Object.setPrototypeOf(actions, null)
|
||||
}
|
||||
if (routes !== undefined) {
|
||||
Object.setPrototypeOf(routes, null)
|
||||
}
|
||||
}
|
||||
|
||||
api.param('collection', (req, res, next) => {
|
||||
@@ -312,14 +416,14 @@ export default class RestApi {
|
||||
next()
|
||||
}
|
||||
})
|
||||
api.param('object', (req, res, next) => {
|
||||
api.param('object', async (req, res, next) => {
|
||||
const id = req.params.object
|
||||
const { type } = req.collection
|
||||
try {
|
||||
req.xapiObject = app.getXapiObject((req.xoObject = app.getObject(id, type)))
|
||||
next()
|
||||
// eslint-disable-next-line require-atomic-updates
|
||||
req.object = await req.collection.getObject(id, req)
|
||||
return next()
|
||||
} catch (error) {
|
||||
if (noSuchObject.is(error, { id, type })) {
|
||||
if (noSuchObject.is(error, { id })) {
|
||||
next('route')
|
||||
} else {
|
||||
next(error)
|
||||
@@ -478,39 +582,12 @@ export default class RestApi {
|
||||
}, true)
|
||||
)
|
||||
|
||||
api
|
||||
.get(
|
||||
'/users',
|
||||
wrap(async (req, res) => {
|
||||
let users = await app.getAllUsers()
|
||||
|
||||
const { filter, limit } = req.query
|
||||
if (filter !== undefined) {
|
||||
users = users.filter(CM.parse(filter).createPredicate())
|
||||
}
|
||||
if (limit < users.length) {
|
||||
users.length = limit
|
||||
}
|
||||
|
||||
sendObjects(users.map(getUserPublicProperties), req, res)
|
||||
})
|
||||
)
|
||||
.get(
|
||||
'/users/:id',
|
||||
wrap(async (req, res) => {
|
||||
res.json(getUserPublicProperties(await app.getUser(req.params.id)))
|
||||
})
|
||||
)
|
||||
|
||||
api.get(
|
||||
'/:collection',
|
||||
wrap(async (req, res) => {
|
||||
const { query } = req
|
||||
await sendObjects(
|
||||
await app.getObjects({
|
||||
filter: every(req.collection.isCorrectType, handleOptionalUserFilter(query.filter)),
|
||||
limit: ifDef(query.limit, Number),
|
||||
}),
|
||||
await req.collection.getObjects(handleOptionalUserFilter(query.filter), ifDef(query.limit, Number)),
|
||||
req,
|
||||
res
|
||||
)
|
||||
@@ -563,7 +640,7 @@ export default class RestApi {
|
||||
)
|
||||
|
||||
api.get('/:collection/:object', (req, res) => {
|
||||
let result = req.xoObject
|
||||
let result = req.object
|
||||
|
||||
// add locations of sub-routes for discoverability
|
||||
const { routes } = req.collection
|
||||
@@ -618,7 +695,7 @@ export default class RestApi {
|
||||
'/:collection/:object/tasks',
|
||||
wrap(async (req, res) => {
|
||||
const { query } = req
|
||||
const objectId = req.xoObject.id
|
||||
const objectId = req.object.id
|
||||
const tasks = app.tasks.list({
|
||||
filter: every(
|
||||
_ => _.status === 'pending' && _.properties.objectId === objectId,
|
||||
@@ -658,9 +735,9 @@ export default class RestApi {
|
||||
}
|
||||
}
|
||||
|
||||
const { xapiObject, xoObject } = req
|
||||
const task = app.tasks.create({ name: `REST: ${action} ${req.collection.type}`, objectId: xoObject.id })
|
||||
const pResult = task.run(() => fn({ xapiObject, xoObject }, params, req))
|
||||
const { object, xapiObject } = req
|
||||
const task = app.tasks.create({ name: `REST: ${action} ${req.collection.type}`, objectId: object.id })
|
||||
const pResult = task.run(() => fn({ object, xapiObject }, params, req))
|
||||
if (Object.hasOwn(req.query, 'sync')) {
|
||||
pResult.then(result => res.json(result), next)
|
||||
} else {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "xo-web",
|
||||
"version": "5.136.0",
|
||||
"version": "5.136.1",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Web interface client for Xen-Orchestra",
|
||||
"keywords": [
|
||||
|
||||
@@ -31,11 +31,10 @@ export default class ResourceSetQuotas extends Component {
|
||||
|
||||
forEach(RESOURCES, resource => {
|
||||
if (limits[resource] != null) {
|
||||
const { available, total } = limits[resource]
|
||||
const { total, usage } = limits[resource]
|
||||
quotas[resource] = {
|
||||
available,
|
||||
total,
|
||||
usage: total - available,
|
||||
usage,
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -89,22 +88,26 @@ export default class ResourceSetQuotas extends Component {
|
||||
<CardBlock className='text-center'>
|
||||
{quota !== undefined ? (
|
||||
<div>
|
||||
<ChartistGraph
|
||||
data={{
|
||||
labels,
|
||||
series: [quota.available, quota.usage],
|
||||
}}
|
||||
options={{
|
||||
donut: true,
|
||||
donutWidth: 40,
|
||||
showLabel: false,
|
||||
}}
|
||||
type='Pie'
|
||||
/>
|
||||
{Number.isFinite(quota.total) ? (
|
||||
<ChartistGraph
|
||||
data={{
|
||||
labels,
|
||||
series: [quota.total - quota.usage, quota.usage],
|
||||
}}
|
||||
options={{
|
||||
donut: true,
|
||||
donutWidth: 40,
|
||||
showLabel: false,
|
||||
}}
|
||||
type='Pie'
|
||||
/>
|
||||
) : (
|
||||
<p className='text-xs-center display-1'>∞</p>
|
||||
)}
|
||||
<p className='text-xs-center'>
|
||||
{_('resourceSetQuota', {
|
||||
total: validFormat ? quota.total.toString() : formatSize(quota.total),
|
||||
usage: validFormat ? quota.usage.toString() : formatSize(quota.usage),
|
||||
total: !Number.isFinite(quota.total) ? Infinity : formatSize(quota.total),
|
||||
usage: validFormat ? quota.usage?.toString() : formatSize(quota.usage),
|
||||
})}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@@ -752,6 +752,11 @@ class SortedTable extends Component {
|
||||
const { location, stateUrlParam } = this.props
|
||||
this.setState({ itemsPerPage })
|
||||
cookies.set(`${location.pathname}-${stateUrlParam}`, itemsPerPage)
|
||||
|
||||
// changing the number of items per page should send back to the first page
|
||||
//
|
||||
// see https://github.com/vatesfr/xen-orchestra/issues/7350
|
||||
this._setPage(1)
|
||||
}
|
||||
|
||||
render() {
|
||||
|
||||
@@ -555,6 +555,11 @@ export default class Home extends Component {
|
||||
_setNItemsPerPage(nItems) {
|
||||
this.setState({ homeItemsPerPage: nItems })
|
||||
cookies.set('homeItemsPerPage', nItems)
|
||||
|
||||
// changing the number of items per page should send back to the first page
|
||||
//
|
||||
// see https://github.com/vatesfr/xen-orchestra/issues/7350
|
||||
this._onPageSelection(1)
|
||||
}
|
||||
|
||||
_getPage() {
|
||||
|
||||
@@ -1870,29 +1870,21 @@ export default class NewVm extends BaseComponent {
|
||||
{limits && (
|
||||
<Row>
|
||||
<Col size={3}>
|
||||
{cpusLimits && (
|
||||
<Limits
|
||||
limit={cpusLimits.total}
|
||||
toBeUsed={CPUs * factor}
|
||||
used={cpusLimits.total - cpusLimits.available}
|
||||
/>
|
||||
{cpusLimits?.total !== undefined && (
|
||||
<Limits limit={cpusLimits.total} toBeUsed={CPUs * factor} used={cpusLimits.usage} />
|
||||
)}
|
||||
</Col>
|
||||
<Col size={3}>
|
||||
{memoryLimits && (
|
||||
<Limits
|
||||
limit={memoryLimits.total}
|
||||
toBeUsed={_memory * factor}
|
||||
used={memoryLimits.total - memoryLimits.available}
|
||||
/>
|
||||
{memoryLimits?.total !== undefined && (
|
||||
<Limits limit={memoryLimits.total} toBeUsed={_memory * factor} used={memoryLimits.usage} />
|
||||
)}
|
||||
</Col>
|
||||
<Col size={3}>
|
||||
{diskLimits && (
|
||||
{diskLimits?.total !== undefined && (
|
||||
<Limits
|
||||
limit={diskLimits.total}
|
||||
toBeUsed={(sumBy(VDIs, 'size') + sum(map(existingDisks, disk => disk.size))) * factor}
|
||||
used={diskLimits.total - diskLimits.available}
|
||||
used={diskLimits.usage}
|
||||
/>
|
||||
)}
|
||||
</Col>
|
||||
@@ -1923,10 +1915,10 @@ export default class NewVm extends BaseComponent {
|
||||
const factor = multipleVms ? nameLabels.length : 1
|
||||
|
||||
return !(
|
||||
CPUs * factor > get(() => resourceSet.limits.cpus.available) ||
|
||||
_memory * factor > get(() => resourceSet.limits.memory.available) ||
|
||||
CPUs * factor > get(() => resourceSet.limits.cpus.total - resourceSet.limits.cpus.usage) ||
|
||||
_memory * factor > get(() => resourceSet.limits.memory.total - resourceSet.limits.memory.usage) ||
|
||||
(sumBy(VDIs, 'size') + sum(map(existingDisks, disk => disk.size))) * factor >
|
||||
get(() => resourceSet.limits.disk.available)
|
||||
get(() => resourceSet.limits.disk.total - resourceSet.limits.disk.usage)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user