Compare commits
4 Commits
xo5/resour
...
loadBalanc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a75034bc6d | ||
|
|
921e847711 | ||
|
|
7670ddf2e8 | ||
|
|
d0476b563f |
@@ -17,6 +17,7 @@
|
||||
- [Host/Reboot] Confirmation modal to reboot an updated slave host if the master is not [#7059](https://github.com/vatesfr/xen-orchestra/issues/7059) (PR [#7293](https://github.com/vatesfr/xen-orchestra/pull/7293))
|
||||
- [Backup/Restore] Show whether the memory was backed up (PR [#7315](https://github.com/vatesfr/xen-orchestra/pull/7315))
|
||||
- [Plugin/load-balancer] Limit concurrent VM migrations to 2 (configurable) to avoid long paused VMs [#7084](https://github.com/vatesfr/xen-orchestra/issues/7084) (PR [#7297](https://github.com/vatesfr/xen-orchestra/pull/7297))
|
||||
- [Plugin/load-balancer] A parameter was added in performance mode to balance VMs on hosts depending on their number of vCPU, when it does not cause performance issues. [#5389](https://github.com/vatesfr/xen-orchestra/issues/5389) (PR [#7333](https://github.com/vatesfr/xen-orchestra/pull/7333))
|
||||
- [Tags] Admin can create colored tags (PR [#7262](https://github.com/vatesfr/xen-orchestra/pull/7262))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"engines": {
|
||||
"node": ">=7"
|
||||
"node": ">=12.9"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.6",
|
||||
|
||||
@@ -88,9 +88,20 @@ export const configurationSchema = {
|
||||
$type: 'Tag',
|
||||
},
|
||||
},
|
||||
|
||||
balanceVcpus: {
|
||||
type: 'boolean',
|
||||
title: 'Balance vCPUs',
|
||||
description: 'pre-position VMs on hosts to balance vCPU/CPU ratio (performance plan only)',
|
||||
},
|
||||
},
|
||||
|
||||
required: ['name', 'mode', 'pools'],
|
||||
// when UI will allow it, remove this anyOf and hide balanceVcpu option outside performance mode
|
||||
anyOf: [
|
||||
{ properties: { mode: { const: 'Performance mode' } } },
|
||||
{ properties: { balanceVcpus: { const: false } } },
|
||||
],
|
||||
},
|
||||
|
||||
minItems: 1,
|
||||
|
||||
@@ -45,25 +45,27 @@ export default class PerformancePlan extends Plan {
|
||||
toOptimizeOnly: true,
|
||||
})
|
||||
|
||||
if (!results) {
|
||||
return
|
||||
if (results) {
|
||||
const { averages, toOptimize } = results
|
||||
toOptimize.sort((a, b) => -this._sortHosts(a, b))
|
||||
for (const exceededHost of toOptimize) {
|
||||
const { id } = exceededHost
|
||||
|
||||
debug(`Try to optimize Host (${exceededHost.id}).`)
|
||||
const availableHosts = filter(hosts, host => host.id !== id)
|
||||
debug(`Available destinations: ${availableHosts.map(host => host.id)}.`)
|
||||
|
||||
// Search bests combinations for the worst host.
|
||||
await this._optimize({
|
||||
exceededHost,
|
||||
hosts: availableHosts,
|
||||
hostsAverages: averages,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const { averages, toOptimize } = results
|
||||
toOptimize.sort((a, b) => -this._sortHosts(a, b))
|
||||
for (const exceededHost of toOptimize) {
|
||||
const { id } = exceededHost
|
||||
|
||||
debug(`Try to optimize Host (${exceededHost.id}).`)
|
||||
const availableHosts = filter(hosts, host => host.id !== id)
|
||||
debug(`Available destinations: ${availableHosts.map(host => host.id)}.`)
|
||||
|
||||
// Search bests combinations for the worst host.
|
||||
await this._optimize({
|
||||
exceededHost,
|
||||
hosts: availableHosts,
|
||||
hostsAverages: averages,
|
||||
})
|
||||
if (this._balanceVcpus) {
|
||||
await this._processVcpuPrepositionning()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,9 +18,12 @@ const LOW_THRESHOLD_FACTOR = 0.65
|
||||
const HIGH_THRESHOLD_MEMORY_FREE_FACTOR = 1.2
|
||||
const LOW_THRESHOLD_MEMORY_FREE_FACTOR = 1.5
|
||||
|
||||
const THRESHOLD_VCPU_RATIO = 0.9
|
||||
|
||||
const numberOrDefault = (value, def) => (value >= 0 ? value : def)
|
||||
|
||||
export const debugAffinity = str => debug(`anti-affinity: ${str}`)
|
||||
export const debugVcpuBalancing = str => debug(`vCPU balancing: ${str}`)
|
||||
|
||||
// ===================================================================
|
||||
// Averages.
|
||||
@@ -96,12 +99,18 @@ function setRealCpuAverageOfVms(vms, vmsAverages, nCpus) {
|
||||
|
||||
// ===================================================================
|
||||
|
||||
function vcpuPerCpuRatio(host) {
|
||||
return host.vcpuCount / host.cpuCount
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class Plan {
|
||||
constructor(
|
||||
xo,
|
||||
name,
|
||||
poolIds,
|
||||
{ excludedHosts, thresholds, antiAffinityTags = [] },
|
||||
{ excludedHosts, thresholds, balanceVcpus, antiAffinityTags = [] },
|
||||
globalOptions,
|
||||
concurrentMigrationLimiter
|
||||
) {
|
||||
@@ -119,6 +128,7 @@ export default class Plan {
|
||||
},
|
||||
}
|
||||
this._antiAffinityTags = antiAffinityTags
|
||||
this._balanceVcpus = balanceVcpus
|
||||
this._globalOptions = globalOptions
|
||||
this._concurrentMigrationLimiter = concurrentMigrationLimiter
|
||||
|
||||
@@ -276,6 +286,191 @@ export default class Plan {
|
||||
return vmsAverages
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
// vCPU pre-positionning helpers
|
||||
// ===================================================================
|
||||
|
||||
async _processVcpuPrepositionning() {
|
||||
const promises = []
|
||||
|
||||
const allHosts = await this._getHosts()
|
||||
if (allHosts.length <= 1) {
|
||||
return
|
||||
}
|
||||
const idToHost = keyBy(allHosts, 'id')
|
||||
const allVms = filter(this._getAllRunningVms(), vm => vm.$container in idToHost)
|
||||
const hostList = this._getVCPUHosts(allHosts, allVms)
|
||||
const idealVcpuPerCpuRatio =
|
||||
hostList.reduce((sum, host) => sum + host.vcpuCount, 0) / hostList.reduce((sum, host) => sum + host.cpuCount, 0)
|
||||
|
||||
debugVcpuBalancing('Try to apply vCPU prepositionning.')
|
||||
debugVcpuBalancing(`vCPU count per host: ${inspect(hostList, { depth: null })}`)
|
||||
debugVcpuBalancing(`Average vCPUs per CPU: ${idealVcpuPerCpuRatio}`)
|
||||
|
||||
// execute prepositionning only if vCPU/CPU ratios are different enough, to prevent executing too often
|
||||
const ratio = vcpuPerCpuRatio(minBy(hostList, vcpuPerCpuRatio)) / vcpuPerCpuRatio(maxBy(hostList, vcpuPerCpuRatio))
|
||||
if (ratio > THRESHOLD_VCPU_RATIO) {
|
||||
debugVcpuBalancing(`vCPU ratios not different enough : ${ratio}`)
|
||||
return
|
||||
}
|
||||
|
||||
const vmsAverages = await this._getVmsAverages(allVms, idToHost)
|
||||
const { averages: hostsAverages } = await this._getHostStatsAverages({ hosts: allHosts })
|
||||
|
||||
// 1. Find source host from which to migrate.
|
||||
const sources = sortBy(
|
||||
filter(hostList, host => (host.vcpuCount - 1) / host.cpuCount >= idealVcpuPerCpuRatio),
|
||||
[
|
||||
host => -vcpuPerCpuRatio(host),
|
||||
// Find host with the most memory used
|
||||
host => hostsAverages[host.id].memoryFree,
|
||||
]
|
||||
)
|
||||
debugVcpuBalancing(`Sources: ${inspect(sources, { depth: null })}`)
|
||||
|
||||
for (const sourceHost of sources) {
|
||||
let deltaSource = sourceHost.vcpuCount - sourceHost.cpuCount * idealVcpuPerCpuRatio
|
||||
// deltaSource = 0 has no guaranatee to be reachable, its value can be non-integer
|
||||
if (deltaSource < 1) {
|
||||
continue
|
||||
}
|
||||
|
||||
// 2. Find destination host.
|
||||
const destinations = sortBy(
|
||||
filter(hostList, host => host.id !== sourceHost.id && host.vcpuCount < host.cpuCount * idealVcpuPerCpuRatio),
|
||||
[
|
||||
// trying to avoid migrations between pools
|
||||
host => host.poolId === sourceHost.poolId,
|
||||
vcpuPerCpuRatio,
|
||||
host => -hostsAverages[host.id].memoryFree,
|
||||
]
|
||||
)
|
||||
debugVcpuBalancing(`Destinations : ${inspect(destinations, { depth: null })}`)
|
||||
|
||||
if (!destinations.length) {
|
||||
continue // Cannot find a valid destination.
|
||||
}
|
||||
|
||||
// Build VM list to migrate.
|
||||
const sourceVms = Object.values(sourceHost.vms)
|
||||
|
||||
// eslint-disable-next-line no-labels
|
||||
destinationLoop: for (const destinationHost of destinations) {
|
||||
debugVcpuBalancing(`Host candidate: ${sourceHost.id} -> ${destinationHost.id}`)
|
||||
|
||||
// calculating how many vCPUs source should give and how many destination should accept
|
||||
let deltaDestination = destinationHost.vcpuCount - destinationHost.cpuCount * idealVcpuPerCpuRatio
|
||||
|
||||
if (
|
||||
deltaDestination >= 0 ||
|
||||
hostsAverages[destinationHost.id].cpu >= this._thresholds.cpu.low ||
|
||||
hostsAverages[destinationHost.id].memoryFree <= this._thresholds.memoryFree.low
|
||||
) {
|
||||
continue
|
||||
}
|
||||
|
||||
// avoiding to migrate too much vCPUs for source or destination
|
||||
// deltaSource is positive, deltaDestination is negative, we check which one has greater absolute value
|
||||
let delta = deltaSource > -deltaDestination ? Math.ceil(-deltaDestination) : Math.ceil(deltaSource)
|
||||
const vms = sortBy(
|
||||
filter(
|
||||
sourceVms,
|
||||
vm => hostsAverages[destinationHost.id].memoryFree >= vmsAverages[vm.id].memory && vm.CPUs.number <= delta
|
||||
),
|
||||
[vm => -vm.CPUs.number]
|
||||
)
|
||||
|
||||
for (const vm of vms) {
|
||||
// migrate only if destination is vCPU-underloaded and if this does not cause performance issues
|
||||
if (
|
||||
vm.CPUs.number <= delta &&
|
||||
hostsAverages[destinationHost.id].cpu + vmsAverages[vm.id].cpu < this._thresholds.cpu.low &&
|
||||
hostsAverages[destinationHost.id].memoryFree - vmsAverages[vm.id].memory > this._thresholds.memoryFree.low
|
||||
) {
|
||||
const source = idToHost[sourceHost.id]
|
||||
const destination = idToHost[destinationHost.id]
|
||||
debugVcpuBalancing(
|
||||
`Migrate VM (${vm.id} "${vm.name_label}") with ${vm.CPUs.number} vCPU to Host (${destinationHost.id} "${destination.name_label}") from Host (${sourceHost.id} "${source.name_label}").`
|
||||
)
|
||||
// 3. Update tags and averages.
|
||||
// This update can change the source host for the next migration.
|
||||
sourceHost.vcpuCount -= vm.CPUs.number
|
||||
destinationHost.vcpuCount += vm.CPUs.number
|
||||
|
||||
const destinationAverages = hostsAverages[destinationHost.id]
|
||||
const vmAverages = vmsAverages[vm.id]
|
||||
|
||||
destinationAverages.cpu += vmAverages.cpu
|
||||
destinationAverages.memoryFree -= vmAverages.memory
|
||||
|
||||
delete sourceHost.vms[vm.id]
|
||||
|
||||
// 4. Migrate.
|
||||
const sourceXapi = this.xo.getXapi(source)
|
||||
promises.push(
|
||||
this._concurrentMigrationLimiter.call(
|
||||
sourceXapi,
|
||||
'migrateVm',
|
||||
vm._xapiId,
|
||||
this.xo.getXapi(destination),
|
||||
destination._xapiId
|
||||
)
|
||||
)
|
||||
debugVcpuBalancing(`vCPU count per host: ${inspect(hostList, { depth: null })}`)
|
||||
|
||||
// 5. Check if source host is still overloaded and if destination host is still underloaded
|
||||
deltaSource = sourceHost.vcpuCount - sourceHost.cpuCount * idealVcpuPerCpuRatio
|
||||
if (deltaSource < 1) {
|
||||
// eslint-disable-next-line no-labels
|
||||
break destinationLoop
|
||||
}
|
||||
deltaDestination = destinationHost.vcpuCount - destinationHost.cpuCount * idealVcpuPerCpuRatio
|
||||
if (deltaDestination >= 0) {
|
||||
break
|
||||
}
|
||||
delta = deltaSource > -deltaDestination ? Math.ceil(-deltaDestination) : Math.ceil(deltaSource)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return Promise.allSettled(promises)
|
||||
}
|
||||
|
||||
_getVCPUHosts(hosts, vms) {
|
||||
const idToHost = {}
|
||||
for (const host of hosts) {
|
||||
const taggedHost = (idToHost[host.id] = {
|
||||
id: host.id,
|
||||
poolId: host.$poolId,
|
||||
cpuCount: parseInt(host.CPUs.cpu_count),
|
||||
vcpuCount: 0,
|
||||
vms: {},
|
||||
})
|
||||
|
||||
// Hide properties when util.inspect is used.
|
||||
Object.defineProperties(taggedHost, {
|
||||
poolId: { enumerable: false },
|
||||
vms: { enumerable: false },
|
||||
})
|
||||
}
|
||||
|
||||
for (const vm of vms) {
|
||||
const hostId = vm.$container
|
||||
if (!(hostId in idToHost)) {
|
||||
continue
|
||||
}
|
||||
|
||||
const host = idToHost[hostId]
|
||||
host.vcpuCount += vm.CPUs.number
|
||||
|
||||
if (vm.xenTools && vm.tags.every(tag => !this._antiAffinityTags.includes(tag))) {
|
||||
host.vms[vm.id] = vm
|
||||
}
|
||||
}
|
||||
|
||||
return Object.values(idToHost)
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
// Anti-affinity helpers
|
||||
// ===================================================================
|
||||
|
||||
16
yarn.lock
16
yarn.lock
@@ -4077,14 +4077,6 @@
|
||||
estree-walker "^2.0.2"
|
||||
source-map-js "^1.0.2"
|
||||
|
||||
"@vue/compiler-dom@3.3.11", "@vue/compiler-dom@^3.3.0":
|
||||
version "3.3.11"
|
||||
resolved "https://registry.yarnpkg.com/@vue/compiler-dom/-/compiler-dom-3.3.11.tgz#36a76ea3a296d41bad133a6912cb0a847d969e4f"
|
||||
integrity sha512-zoAiUIqSKqAJ81WhfPXYmFGwDRuO+loqLxvXmfUdR5fOitPoUiIeFI9cTTyv9MU5O1+ZZglJVTusWzy+wfk5hw==
|
||||
dependencies:
|
||||
"@vue/compiler-core" "3.3.11"
|
||||
"@vue/shared" "3.3.11"
|
||||
|
||||
"@vue/compiler-dom@3.4.13":
|
||||
version "3.4.13"
|
||||
resolved "https://registry.yarnpkg.com/@vue/compiler-dom/-/compiler-dom-3.4.13.tgz#66a80a6ee412a3d32b7175a146b75d9ec3d1c50c"
|
||||
@@ -4101,6 +4093,14 @@
|
||||
"@vue/compiler-core" "3.4.14"
|
||||
"@vue/shared" "3.4.14"
|
||||
|
||||
"@vue/compiler-dom@^3.3.0":
|
||||
version "3.3.11"
|
||||
resolved "https://registry.yarnpkg.com/@vue/compiler-dom/-/compiler-dom-3.3.11.tgz#36a76ea3a296d41bad133a6912cb0a847d969e4f"
|
||||
integrity sha512-zoAiUIqSKqAJ81WhfPXYmFGwDRuO+loqLxvXmfUdR5fOitPoUiIeFI9cTTyv9MU5O1+ZZglJVTusWzy+wfk5hw==
|
||||
dependencies:
|
||||
"@vue/compiler-core" "3.3.11"
|
||||
"@vue/shared" "3.3.11"
|
||||
|
||||
"@vue/compiler-sfc@2.7.16":
|
||||
version "2.7.16"
|
||||
resolved "https://registry.yarnpkg.com/@vue/compiler-sfc/-/compiler-sfc-2.7.16.tgz#ff81711a0fac9c68683d8bb00b63f857de77dc83"
|
||||
|
||||
Reference in New Issue
Block a user