Merge remote-tracking branch 'xo-server-load-balancer/master'
This commit is contained in:
commit
c90a687179
65
packages/xo-server-load-balancer/.editorconfig
Normal file
65
packages/xo-server-load-balancer/.editorconfig
Normal file
@ -0,0 +1,65 @@
|
||||
# http://EditorConfig.org
|
||||
#
|
||||
# Julien Fontanet's configuration
|
||||
# https://gist.github.com/julien-f/8096213
|
||||
|
||||
# Top-most EditorConfig file.
|
||||
root = true
|
||||
|
||||
# Common config.
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespaces = true
|
||||
|
||||
# CoffeeScript
|
||||
#
|
||||
# https://github.com/polarmobile/coffeescript-style-guide/blob/master/README.md
|
||||
[*.{,lit}coffee]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# Markdown
|
||||
[*.{md,mdwn,mdown,markdown}]
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
|
||||
# Package.json
|
||||
#
|
||||
# This indentation style is the one used by npm.
|
||||
[/package.json]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# Jade
|
||||
[*.jade]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# JavaScript
|
||||
#
|
||||
# Two spaces seems to be the standard most common style, at least in
|
||||
# Node.js (http://nodeguide.com/style.html#tabs-vs-spaces).
|
||||
[*.{js,jsx,ts,tsx}]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# Less
|
||||
[*.less]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# Sass
|
||||
#
|
||||
# Style used for http://libsass.com
|
||||
[*.s[ac]ss]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# YAML
|
||||
#
|
||||
# Only spaces are allowed.
|
||||
[*.yaml]
|
||||
indent_size = 2
|
||||
indent_style = space
|
5
packages/xo-server-load-balancer/.gitignore
vendored
Normal file
5
packages/xo-server-load-balancer/.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
/dist/
|
||||
/node_modules/
|
||||
|
||||
npm-debug.log
|
||||
npm-debug.log.*
|
10
packages/xo-server-load-balancer/.npmignore
Normal file
10
packages/xo-server-load-balancer/.npmignore
Normal file
@ -0,0 +1,10 @@
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
8
packages/xo-server-load-balancer/.travis.yml
Normal file
8
packages/xo-server-load-balancer/.travis.yml
Normal file
@ -0,0 +1,8 @@
|
||||
language: node_js
|
||||
node_js:
|
||||
- 'stable'
|
||||
- '4'
|
||||
|
||||
# Use containers.
|
||||
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/
|
||||
sudo: false
|
53
packages/xo-server-load-balancer/README.md
Normal file
53
packages/xo-server-load-balancer/README.md
Normal file
@ -0,0 +1,53 @@
|
||||
# xo-server-load-balancer [](https://travis-ci.org/vatesfr/xo-server-load-balancer)
|
||||
|
||||
XO-Server plugin that allows load balancing.
|
||||
|
||||
## Install
|
||||
|
||||
Go inside your `xo-server` folder and install it:
|
||||
|
||||
```
|
||||
> npm install --global xo-server-load-balancer
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Like all other xo-server plugins, it can be configured directly via
|
||||
the web iterface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
|
||||
|
||||
## Development
|
||||
|
||||
### Installing dependencies
|
||||
|
||||
```
|
||||
> npm install
|
||||
```
|
||||
|
||||
### Compilation
|
||||
|
||||
The sources files are watched and automatically recompiled on changes.
|
||||
|
||||
```
|
||||
> npm run dev
|
||||
```
|
||||
|
||||
### Tests
|
||||
|
||||
```
|
||||
> npm run test-dev
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xo-server-load-balancer/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
AGPL3 © [Vates SAS](http://vates.fr)
|
78
packages/xo-server-load-balancer/package.json
Normal file
78
packages/xo-server-load-balancer/package.json
Normal file
@ -0,0 +1,78 @@
|
||||
{
|
||||
"name": "xo-server-load-balancer",
|
||||
"version": "0.3.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Load balancer for XO-Server",
|
||||
"keywords": [
|
||||
"load",
|
||||
"balancer",
|
||||
"server",
|
||||
"pool",
|
||||
"host"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xo-server-load-balancer",
|
||||
"bugs": "https://github.com/vatesfr/xo-server-load-balancer/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xo-server-load-balancer"
|
||||
},
|
||||
"author": {
|
||||
"name": "Julien Fontanet",
|
||||
"email": "julien.fontanet@isonoe.net"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"babel-runtime": "^6.11.6",
|
||||
"cron": "^1.1.0",
|
||||
"event-to-promise": "^0.7.0",
|
||||
"lodash": "^4.16.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"babel-cli": "^6.16.0",
|
||||
"babel-eslint": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.2.9",
|
||||
"babel-plugin-transform-runtime": "^6.15.0",
|
||||
"babel-preset-es2015": "^6.16.0",
|
||||
"babel-preset-stage-0": "^6.16.0",
|
||||
"dependency-check": "^2.5.1",
|
||||
"ghooks": "^1.3.2",
|
||||
"standard": "^8.2.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"depcheck": "dependency-check ./package.json",
|
||||
"dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"lint": "standard",
|
||||
"posttest": "npm run lint && npm run depcheck",
|
||||
"prepublish": "npm run build"
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
"transform-runtime",
|
||||
"lodash"
|
||||
],
|
||||
"presets": [
|
||||
"es2015",
|
||||
"stage-0"
|
||||
]
|
||||
},
|
||||
"standard": {
|
||||
"ignore": [
|
||||
"dist"
|
||||
],
|
||||
"parser": "babel-eslint"
|
||||
},
|
||||
"config": {
|
||||
"ghooks": {
|
||||
"commit-msg": "npm test"
|
||||
}
|
||||
}
|
||||
}
|
225
packages/xo-server-load-balancer/src/density-plan.js
Normal file
225
packages/xo-server-load-balancer/src/density-plan.js
Normal file
@ -0,0 +1,225 @@
|
||||
import { clone, filter, map as mapToArray } from 'lodash'
|
||||
|
||||
import Plan from './plan'
|
||||
import { debug } from './utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class DensityPlan extends Plan {
|
||||
_checkRessourcesThresholds (objects, averages) {
|
||||
return filter(objects, object =>
|
||||
averages[object.id].memoryFree > this._thresholds.memoryFree.low
|
||||
)
|
||||
}
|
||||
|
||||
async execute () {
|
||||
const results = await this._findHostsToOptimize()
|
||||
|
||||
if (!results) {
|
||||
return
|
||||
}
|
||||
|
||||
const {
|
||||
hosts,
|
||||
toOptimize
|
||||
} = results
|
||||
|
||||
let {
|
||||
averages: hostsAverages
|
||||
} = results
|
||||
|
||||
const pools = await this._getPlanPools()
|
||||
let optimizationsCount = 0
|
||||
|
||||
for (const hostToOptimize of toOptimize) {
|
||||
const {
|
||||
id: hostId,
|
||||
$poolId: poolId
|
||||
} = hostToOptimize
|
||||
|
||||
const {
|
||||
master: masterId
|
||||
} = pools[poolId]
|
||||
|
||||
// Avoid master optimization.
|
||||
if (masterId === hostId) {
|
||||
continue
|
||||
}
|
||||
|
||||
// A host to optimize needs the ability to be restarted.
|
||||
if (hostToOptimize.powerOnMode === '') {
|
||||
debug(`Host (${hostId}) does not have a power on mode.`)
|
||||
continue
|
||||
}
|
||||
|
||||
let poolMaster // Pool master.
|
||||
const poolHosts = [] // Without master.
|
||||
const masters = [] // Without the master of this loop.
|
||||
const otherHosts = []
|
||||
|
||||
for (const dest of hosts) {
|
||||
const {
|
||||
id: destId,
|
||||
$poolId: destPoolId
|
||||
} = dest
|
||||
|
||||
// Destination host != Host to optimize!
|
||||
if (destId === hostId) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (destPoolId === poolId) {
|
||||
if (destId === masterId) {
|
||||
poolMaster = dest
|
||||
} else {
|
||||
poolHosts.push(dest)
|
||||
}
|
||||
} else if (destId === pools[destPoolId].master) {
|
||||
masters.push(dest)
|
||||
} else {
|
||||
otherHosts.push(dest)
|
||||
}
|
||||
}
|
||||
|
||||
const simulResults = await this._simulate({
|
||||
host: hostToOptimize,
|
||||
destinations: [
|
||||
[ poolMaster ],
|
||||
poolHosts,
|
||||
masters,
|
||||
otherHosts
|
||||
],
|
||||
hostsAverages: clone(hostsAverages)
|
||||
})
|
||||
|
||||
if (simulResults) {
|
||||
// Update stats.
|
||||
hostsAverages = simulResults.hostsAverages
|
||||
|
||||
// Migrate.
|
||||
await this._migrate(hostId, simulResults.moves)
|
||||
optimizationsCount++
|
||||
}
|
||||
}
|
||||
|
||||
debug(`Density mode: ${optimizationsCount} optimizations.`)
|
||||
}
|
||||
|
||||
async _simulate ({ host, destinations, hostsAverages }) {
|
||||
const { id: hostId } = host
|
||||
|
||||
debug(`Try to optimize Host (${hostId}).`)
|
||||
|
||||
const vms = await this._getVms(hostId)
|
||||
const vmsAverages = await this._getVmsAverages(vms, host)
|
||||
|
||||
for (const vm of vms) {
|
||||
if (!vm.xenTools) {
|
||||
debug(`VM (${vm.id}) of Host (${hostId}) does not support pool migration.`)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Sort vms by amount of memory. (+ -> -)
|
||||
vms.sort((a, b) =>
|
||||
vmsAverages[b.id].memory - vmsAverages[a.id].memory
|
||||
)
|
||||
|
||||
const simulResults = {
|
||||
hostsAverages,
|
||||
moves: []
|
||||
}
|
||||
|
||||
// Try to find a destination for each VM.
|
||||
for (const vm of vms) {
|
||||
let move
|
||||
|
||||
// Simulate the VM move on a destinations set.
|
||||
for (const subDestinations of destinations) {
|
||||
move = this._testMigration({
|
||||
vm,
|
||||
destinations: subDestinations,
|
||||
hostsAverages,
|
||||
vmsAverages
|
||||
})
|
||||
|
||||
// Destination found.
|
||||
if (move) {
|
||||
simulResults.moves.push(move)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Unable to move a VM.
|
||||
if (!move) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Done.
|
||||
return simulResults
|
||||
}
|
||||
|
||||
// Test if a VM migration on a destination (of a destinations set) is possible.
|
||||
_testMigration ({ vm, destinations, hostsAverages, vmsAverages }) {
|
||||
const {
|
||||
_thresholds: {
|
||||
critical: criticalThreshold
|
||||
}
|
||||
} = this
|
||||
|
||||
// Sort the destinations by available memory. (- -> +)
|
||||
destinations.sort((a, b) =>
|
||||
hostsAverages[a.id].memoryFree - hostsAverages[b.id].memoryFree
|
||||
)
|
||||
|
||||
for (const destination of destinations) {
|
||||
const destinationAverages = hostsAverages[destination.id]
|
||||
const vmAverages = vmsAverages[vm.id]
|
||||
|
||||
// Unable to move the VM.
|
||||
if (
|
||||
destinationAverages.cpu + vmAverages.cpu >= criticalThreshold ||
|
||||
destinationAverages.memoryFree - vmAverages.memory <= criticalThreshold
|
||||
) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Move ok. Update stats.
|
||||
destinationAverages.cpu += vmAverages.cpu
|
||||
destinationAverages.memoryFree -= vmAverages.memory
|
||||
|
||||
// Available movement.
|
||||
return {
|
||||
vm,
|
||||
destination
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Migrate the VMs of one host.
|
||||
// Try to shutdown the VMs host.
|
||||
async _migrate (hostId, moves) {
|
||||
const xapiSrc = this.xo.getXapi(hostId)
|
||||
|
||||
await Promise.all(
|
||||
mapToArray(moves, move => {
|
||||
const {
|
||||
vm,
|
||||
destination
|
||||
} = move
|
||||
const xapiDest = this.xo.getXapi(destination)
|
||||
debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${vm.$container}).`)
|
||||
return xapiDest.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId)
|
||||
})
|
||||
)
|
||||
|
||||
debug(`Shutdown Host (${hostId}).`)
|
||||
|
||||
try {
|
||||
await xapiSrc.shutdownHost(hostId)
|
||||
} catch (error) {
|
||||
debug(`Unable to shutdown Host (${hostId}).`, error)
|
||||
}
|
||||
}
|
||||
}
|
203
packages/xo-server-load-balancer/src/index.js
Normal file
203
packages/xo-server-load-balancer/src/index.js
Normal file
@ -0,0 +1,203 @@
|
||||
import EventEmitter from 'events'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import { CronJob } from 'cron'
|
||||
import { intersection, map as mapToArray, uniq } from 'lodash'
|
||||
|
||||
import DensityPlan from './density-plan'
|
||||
import PerformancePlan from './performance-plan'
|
||||
import {
|
||||
DEFAULT_CRITICAL_THRESHOLD_CPU,
|
||||
DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE
|
||||
} from './plan'
|
||||
import {
|
||||
EXECUTION_DELAY,
|
||||
debug
|
||||
} from './utils'
|
||||
|
||||
class Emitter extends EventEmitter {}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const PERFORMANCE_MODE = 0
|
||||
const DENSITY_MODE = 1
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export const configurationSchema = {
|
||||
type: 'object',
|
||||
|
||||
properties: {
|
||||
plans: {
|
||||
type: 'array',
|
||||
description: 'an array of plans',
|
||||
title: 'Plans',
|
||||
|
||||
items: {
|
||||
type: 'object',
|
||||
title: 'Plan',
|
||||
|
||||
properties: {
|
||||
name: {
|
||||
type: 'string',
|
||||
title: 'Name'
|
||||
},
|
||||
|
||||
mode: {
|
||||
enum: [ 'Performance mode', 'Density mode' ],
|
||||
title: 'Mode'
|
||||
},
|
||||
|
||||
pools: {
|
||||
type: 'array',
|
||||
description: 'list of pools where to apply the policy',
|
||||
|
||||
items: {
|
||||
type: 'string',
|
||||
$type: 'Pool'
|
||||
}
|
||||
},
|
||||
|
||||
thresholds: {
|
||||
type: 'object',
|
||||
title: 'Critical thresholds',
|
||||
|
||||
properties: {
|
||||
cpu: {
|
||||
type: 'integer',
|
||||
title: 'CPU (%)',
|
||||
default: DEFAULT_CRITICAL_THRESHOLD_CPU
|
||||
},
|
||||
memoryFree: {
|
||||
type: 'integer',
|
||||
title: 'RAM, Free memory (MB)',
|
||||
default: DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
excludedHosts: {
|
||||
type: 'array',
|
||||
title: 'Excluded hosts',
|
||||
description: 'list of hosts that are not affected by the plan',
|
||||
|
||||
items: {
|
||||
type: 'string',
|
||||
$type: 'Host'
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
required: [ 'name', 'mode', 'pools' ]
|
||||
},
|
||||
|
||||
minItems: 1
|
||||
}
|
||||
},
|
||||
|
||||
additionalProperties: false
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// Create a job not enabled by default.
|
||||
// A job is a cron task, a running and enabled state.
|
||||
const makeJob = (cronPattern, fn) => {
|
||||
const job = {
|
||||
running: false,
|
||||
emitter: new Emitter()
|
||||
}
|
||||
|
||||
job.cron = new CronJob(cronPattern, async () => {
|
||||
if (job.running) {
|
||||
return
|
||||
}
|
||||
|
||||
job.running = true
|
||||
|
||||
try {
|
||||
await fn()
|
||||
} catch (error) {
|
||||
console.error('[WARN] scheduled function:', error && error.stack || error)
|
||||
} finally {
|
||||
job.running = false
|
||||
job.emitter.emit('finish')
|
||||
}
|
||||
})
|
||||
|
||||
job.isEnabled = () => job.cron.running
|
||||
|
||||
return job
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
// ===================================================================
|
||||
|
||||
class LoadBalancerPlugin {
|
||||
constructor (xo) {
|
||||
this.xo = xo
|
||||
this._job = makeJob(`*/${EXECUTION_DELAY} * * * *`, ::this._executePlans)
|
||||
this._emitter
|
||||
}
|
||||
|
||||
async configure ({ plans }) {
|
||||
const job = this._job
|
||||
const enabled = job.isEnabled()
|
||||
|
||||
if (enabled) {
|
||||
job.cron.stop()
|
||||
}
|
||||
|
||||
// Wait until all old plans stopped running.
|
||||
if (job.running) {
|
||||
await eventToPromise(job.emitter, 'finish')
|
||||
}
|
||||
|
||||
this._plans = []
|
||||
this._poolIds = [] // Used pools.
|
||||
|
||||
if (plans) {
|
||||
for (const plan of plans) {
|
||||
this._addPlan(plan.mode === 'Performance mode' ? PERFORMANCE_MODE : DENSITY_MODE, plan)
|
||||
}
|
||||
}
|
||||
|
||||
if (enabled) {
|
||||
job.cron.start()
|
||||
}
|
||||
}
|
||||
|
||||
load () {
|
||||
this._job.cron.start()
|
||||
}
|
||||
|
||||
unload () {
|
||||
this._job.cron.stop()
|
||||
}
|
||||
|
||||
_addPlan (mode, { name, pools, ...options }) {
|
||||
pools = uniq(pools)
|
||||
|
||||
// Check already used pools.
|
||||
if (intersection(pools, this._poolIds).length > 0) {
|
||||
throw new Error(`Pool(s) already included in an other plan: ${pools}`)
|
||||
}
|
||||
|
||||
this._poolIds = this._poolIds.concat(pools)
|
||||
this._plans.push(mode === PERFORMANCE_MODE
|
||||
? new PerformancePlan(this.xo, name, pools, options)
|
||||
: new DensityPlan(this.xo, name, pools, options)
|
||||
)
|
||||
}
|
||||
|
||||
_executePlans () {
|
||||
debug('Execute plans!')
|
||||
|
||||
return Promise.all(
|
||||
mapToArray(this._plans, plan => plan.execute())
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default ({ xo }) => new LoadBalancerPlugin(xo)
|
138
packages/xo-server-load-balancer/src/performance-plan.js
Normal file
138
packages/xo-server-load-balancer/src/performance-plan.js
Normal file
@ -0,0 +1,138 @@
|
||||
import { filter, find, map as mapToArray } from 'lodash'
|
||||
|
||||
import Plan from './plan'
|
||||
import { debug } from './utils'
|
||||
|
||||
// Compare a list of objects and give the best.
|
||||
function searchBestObject (objects, fun) {
|
||||
let object = objects[0]
|
||||
|
||||
for (let i = 1; i < objects.length; i++) {
|
||||
if (fun(object, objects[i]) > 0) {
|
||||
object = objects[i]
|
||||
}
|
||||
}
|
||||
|
||||
return object
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class PerformancePlan extends Plan {
|
||||
_checkRessourcesThresholds (objects, averages) {
|
||||
return filter(objects, object => {
|
||||
const objectAverages = averages[object.id]
|
||||
|
||||
return (
|
||||
objectAverages.cpu >= this._thresholds.cpu.high ||
|
||||
objectAverages.memoryFree <= this._thresholds.memoryFree.high
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
async execute () {
|
||||
// Try to power on a hosts set.
|
||||
try {
|
||||
await Promise.all(
|
||||
mapToArray(
|
||||
filter(this._getHosts({ powerState: 'Halted' }), host => host.powerOnMode !== ''),
|
||||
host => {
|
||||
const { id } = host
|
||||
return this.xo.getXapi(id).powerOnHost(id)
|
||||
}
|
||||
)
|
||||
)
|
||||
} catch (error) {
|
||||
console.error(error)
|
||||
}
|
||||
|
||||
const results = await this._findHostsToOptimize()
|
||||
|
||||
if (!results) {
|
||||
return
|
||||
}
|
||||
|
||||
const {
|
||||
averages,
|
||||
toOptimize
|
||||
} = results
|
||||
let { hosts } = results
|
||||
|
||||
toOptimize.sort((a, b) => {
|
||||
a = averages[a.id]
|
||||
b = averages[b.id]
|
||||
|
||||
return (b.cpu - a.cpu) || (a.memoryFree - b.memoryFree)
|
||||
})
|
||||
|
||||
for (const exceededHost of toOptimize) {
|
||||
const { id } = exceededHost
|
||||
|
||||
debug(`Try to optimize Host (${exceededHost.id}).`)
|
||||
hosts = filter(hosts, host => host.id !== id)
|
||||
|
||||
// Search bests combinations for the worst host.
|
||||
await this._optimize({
|
||||
exceededHost,
|
||||
hosts,
|
||||
hostsAverages: averages
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _optimize ({ exceededHost, hosts, hostsAverages }) {
|
||||
const vms = await this._getVms(exceededHost.id)
|
||||
const vmsAverages = await this._getVmsAverages(vms, exceededHost)
|
||||
|
||||
// Sort vms by cpu usage. (lower to higher)
|
||||
vms.sort((a, b) =>
|
||||
vmsAverages[b.id].cpu - vmsAverages[a.id].cpu
|
||||
)
|
||||
|
||||
const exceededAverages = hostsAverages[exceededHost.id]
|
||||
const promises = []
|
||||
|
||||
const xapiSrc = this.xo.getXapi(exceededHost)
|
||||
let optimizationsCount = 0
|
||||
|
||||
const searchFunction = (a, b) => hostsAverages[b.id].cpu - hostsAverages[a.id].cpu
|
||||
|
||||
for (const vm of vms) {
|
||||
// Search host with lower cpu usage in the same pool first. In other pool if necessary.
|
||||
let destination = searchBestObject(find(hosts, host => host.$poolId === vm.$poolId), searchFunction)
|
||||
|
||||
if (!destination) {
|
||||
destination = searchBestObject(hosts, searchFunction)
|
||||
}
|
||||
|
||||
const destinationAverages = hostsAverages[destination.id]
|
||||
const vmAverages = vmsAverages[vm.id]
|
||||
|
||||
// Unable to move the vm.
|
||||
if (
|
||||
exceededAverages.cpu - vmAverages.cpu < destinationAverages.cpu + vmAverages.cpu ||
|
||||
destinationAverages.memoryFree > vmAverages.memory
|
||||
) {
|
||||
continue
|
||||
}
|
||||
|
||||
exceededAverages.cpu -= vmAverages.cpu
|
||||
destinationAverages.cpu += vmAverages.cpu
|
||||
|
||||
exceededAverages.memoryFree += vmAverages.memory
|
||||
destinationAverages.memoryFree -= vmAverages.memory
|
||||
|
||||
debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id}).`)
|
||||
optimizationsCount++
|
||||
|
||||
promises.push(
|
||||
xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId)
|
||||
)
|
||||
}
|
||||
|
||||
await Promise.all(promises)
|
||||
debug(`Performance mode: ${optimizationsCount} optimizations for Host (${exceededHost.id}).`)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
253
packages/xo-server-load-balancer/src/plan.js
Normal file
253
packages/xo-server-load-balancer/src/plan.js
Normal file
@ -0,0 +1,253 @@
|
||||
import { filter, includes, map as mapToArray } from 'lodash'
|
||||
|
||||
import {
|
||||
EXECUTION_DELAY,
|
||||
debug
|
||||
} from './utils'
|
||||
|
||||
const MINUTES_OF_HISTORICAL_DATA = 30
|
||||
|
||||
// CPU threshold in percent.
|
||||
export const DEFAULT_CRITICAL_THRESHOLD_CPU = 90.0
|
||||
|
||||
// Memory threshold in MB.
|
||||
export const DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE = 64.0
|
||||
|
||||
// Thresholds factors.
|
||||
const HIGH_THRESHOLD_FACTOR = 0.85
|
||||
const LOW_THRESHOLD_FACTOR = 0.25
|
||||
|
||||
const HIGH_THRESHOLD_MEMORY_FREE_FACTOR = 1.25
|
||||
const LOW_THRESHOLD_MEMORY_FREE_FACTOR = 20.0
|
||||
|
||||
const numberOrDefault = (value, def) => (value >= 0) ? value : def
|
||||
|
||||
// ===================================================================
|
||||
// Averages.
|
||||
// ===================================================================
|
||||
|
||||
function computeAverage (values, nPoints = values.length) {
|
||||
let sum = 0
|
||||
let tot = 0
|
||||
|
||||
const { length } = values
|
||||
|
||||
for (let i = length - nPoints; i < length; i++) {
|
||||
const value = values[i]
|
||||
|
||||
sum += value || 0
|
||||
|
||||
if (value) {
|
||||
tot += 1
|
||||
}
|
||||
}
|
||||
|
||||
return sum / tot
|
||||
}
|
||||
|
||||
function computeRessourcesAverage (objects, objectsStats, nPoints) {
|
||||
const averages = {}
|
||||
|
||||
for (const object of objects) {
|
||||
const { id } = object
|
||||
const { stats } = objectsStats[id]
|
||||
|
||||
averages[id] = {
|
||||
cpu: computeAverage(
|
||||
mapToArray(stats.cpus, cpu => computeAverage(cpu, nPoints))
|
||||
),
|
||||
nCpus: stats.cpus.length,
|
||||
memoryFree: computeAverage(stats.memoryFree, nPoints),
|
||||
memory: computeAverage(stats.memory, nPoints)
|
||||
}
|
||||
}
|
||||
|
||||
return averages
|
||||
}
|
||||
|
||||
function computeRessourcesAverageWithWeight (averages1, averages2, ratio) {
|
||||
const averages = {}
|
||||
|
||||
for (const id in averages1) {
|
||||
const objectAverages = averages[id] = {}
|
||||
|
||||
for (const averageName in averages1[id]) {
|
||||
objectAverages[averageName] = averages1[id][averageName] * ratio + averages2[id][averageName] * (1 - ratio)
|
||||
}
|
||||
}
|
||||
|
||||
return averages
|
||||
}
|
||||
|
||||
function setRealCpuAverageOfVms (vms, vmsAverages, nCpus) {
|
||||
for (const vm of vms) {
|
||||
const averages = vmsAverages[vm.id]
|
||||
averages.cpu *= averages.nCpus / nCpus
|
||||
}
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class Plan {
|
||||
constructor (xo, name, poolIds, {
|
||||
excludedHosts,
|
||||
thresholds
|
||||
} = {}) {
|
||||
this.xo = xo
|
||||
this._name = name
|
||||
this._poolIds = poolIds
|
||||
this._excludedHosts = excludedHosts
|
||||
this._thresholds = {
|
||||
cpu: {
|
||||
critical: numberOrDefault(thresholds && thresholds.cpu, DEFAULT_CRITICAL_THRESHOLD_CPU)
|
||||
},
|
||||
memoryFree: {
|
||||
critical: numberOrDefault(thresholds && thresholds.memoryFree, DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE) * 1024
|
||||
}
|
||||
}
|
||||
|
||||
for (const key in this._thresholds) {
|
||||
const attr = this._thresholds[key]
|
||||
const { critical } = attr
|
||||
|
||||
if (key === 'memoryFree') {
|
||||
attr.high = critical * HIGH_THRESHOLD_MEMORY_FREE_FACTOR
|
||||
attr.low = critical * LOW_THRESHOLD_MEMORY_FREE_FACTOR
|
||||
} else {
|
||||
attr.high = critical * HIGH_THRESHOLD_FACTOR
|
||||
attr.low = critical * LOW_THRESHOLD_FACTOR
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
execute () {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
// Get hosts to optimize.
|
||||
// ===================================================================
|
||||
|
||||
async _findHostsToOptimize () {
|
||||
const hosts = this._getHosts()
|
||||
const hostsStats = await this._getHostsStats(hosts, 'minutes')
|
||||
|
||||
// Check if a ressource's utilization exceeds threshold.
|
||||
const avgNow = computeRessourcesAverage(hosts, hostsStats, EXECUTION_DELAY)
|
||||
let toOptimize = this._checkRessourcesThresholds(hosts, avgNow)
|
||||
|
||||
// No ressource's utilization problem.
|
||||
if (toOptimize.length === 0) {
|
||||
debug('No hosts to optimize.')
|
||||
return
|
||||
}
|
||||
|
||||
// Check in the last 30 min interval with ratio.
|
||||
const avgBefore = computeRessourcesAverage(hosts, hostsStats, MINUTES_OF_HISTORICAL_DATA)
|
||||
const avgWithRatio = computeRessourcesAverageWithWeight(avgNow, avgBefore, 0.75)
|
||||
|
||||
toOptimize = this._checkRessourcesThresholds(toOptimize, avgWithRatio)
|
||||
|
||||
// No ressource's utilization problem.
|
||||
if (toOptimize.length === 0) {
|
||||
debug('No hosts to optimize.')
|
||||
return
|
||||
}
|
||||
|
||||
return {
|
||||
toOptimize,
|
||||
averages: avgWithRatio,
|
||||
hosts
|
||||
}
|
||||
}
|
||||
|
||||
_checkRessourcesThresholds () {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
// Get objects.
|
||||
// ===================================================================
|
||||
|
||||
_getPlanPools () {
|
||||
const pools = {}
|
||||
|
||||
try {
|
||||
for (const poolId of this._poolIds) {
|
||||
pools[poolId] = this.xo.getObject(poolId)
|
||||
}
|
||||
} catch (_) {
|
||||
return {}
|
||||
}
|
||||
|
||||
return pools
|
||||
}
|
||||
|
||||
// Compute hosts for each pool. They can change over time.
|
||||
_getHosts ({ powerState = 'Running' } = {}) {
|
||||
return filter(this.xo.getObjects(), object => (
|
||||
object.type === 'host' &&
|
||||
includes(this._poolIds, object.$poolId) &&
|
||||
object.power_state === powerState &&
|
||||
!includes(this._excludedHosts, object.id)
|
||||
))
|
||||
}
|
||||
|
||||
async _getVms (hostId) {
|
||||
return filter(this.xo.getObjects(), object =>
|
||||
object.type === 'VM' &&
|
||||
object.power_state === 'Running' &&
|
||||
object.$container === hostId
|
||||
)
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
// Get stats.
|
||||
// ===================================================================
|
||||
|
||||
async _getHostsStats (hosts, granularity) {
|
||||
const hostsStats = {}
|
||||
|
||||
await Promise.all(mapToArray(hosts, host =>
|
||||
this.xo.getXapiHostStats(host, granularity).then(hostStats => {
|
||||
hostsStats[host.id] = {
|
||||
nPoints: hostStats.stats.cpus[0].length,
|
||||
stats: hostStats.stats,
|
||||
averages: {}
|
||||
}
|
||||
})
|
||||
))
|
||||
|
||||
return hostsStats
|
||||
}
|
||||
|
||||
async _getVmsStats (vms, granularity) {
|
||||
const vmsStats = {}
|
||||
|
||||
await Promise.all(mapToArray(vms, vm =>
|
||||
this.xo.getXapiVmStats(vm, granularity).then(vmStats => {
|
||||
vmsStats[vm.id] = {
|
||||
nPoints: vmStats.stats.cpus[0].length,
|
||||
stats: vmStats.stats,
|
||||
averages: {}
|
||||
}
|
||||
})
|
||||
))
|
||||
|
||||
return vmsStats
|
||||
}
|
||||
|
||||
async _getVmsAverages (vms, host) {
|
||||
const vmsStats = await this._getVmsStats(vms, 'minutes')
|
||||
const vmsAverages = computeRessourcesAverageWithWeight(
|
||||
computeRessourcesAverage(vms, vmsStats, EXECUTION_DELAY),
|
||||
computeRessourcesAverage(vms, vmsStats, MINUTES_OF_HISTORICAL_DATA),
|
||||
0.75
|
||||
)
|
||||
|
||||
// Compute real CPU usage. Virtuals cpus to reals cpus.
|
||||
setRealCpuAverageOfVms(vms, vmsAverages, host.CPUs.cpu_count)
|
||||
|
||||
return vmsAverages
|
||||
}
|
||||
}
|
13
packages/xo-server-load-balancer/src/utils.js
Normal file
13
packages/xo-server-load-balancer/src/utils.js
Normal file
@ -0,0 +1,13 @@
|
||||
const noop = () => {}
|
||||
|
||||
const LOAD_BALANCER_DEBUG = 1
|
||||
|
||||
// Delay between each ressources evaluation in minutes.
|
||||
// Must be less than MINUTES_OF_HISTORICAL_DATA.
|
||||
export const EXECUTION_DELAY = 1
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export const debug = LOAD_BALANCER_DEBUG
|
||||
? str => console.log(`[load-balancer]${str}`)
|
||||
: noop
|
Loading…
Reference in New Issue
Block a user