From ab6bd56006d8bf2c65ce526ef988c04523cf51c4 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 24 Feb 2016 10:05:20 +0100 Subject: [PATCH 01/50] Initial commit. --- packages/xo-server-load-balancer/.babelrc | 11 ++ .../xo-server-load-balancer/.editorconfig | 65 ++++++++ packages/xo-server-load-balancer/.gitignore | 9 ++ packages/xo-server-load-balancer/.mocha.js | 5 + packages/xo-server-load-balancer/.mocha.opts | 1 + packages/xo-server-load-balancer/.npmignore | 10 ++ packages/xo-server-load-balancer/.travis.yml | 10 ++ packages/xo-server-load-balancer/README.md | 58 ++++++++ packages/xo-server-load-balancer/package.json | 69 +++++++++ packages/xo-server-load-balancer/src/index.js | 139 ++++++++++++++++++ .../xo-server-load-balancer/src/index.spec.js | 13 ++ 11 files changed, 390 insertions(+) create mode 100644 packages/xo-server-load-balancer/.babelrc create mode 100644 packages/xo-server-load-balancer/.editorconfig create mode 100644 packages/xo-server-load-balancer/.gitignore create mode 100644 packages/xo-server-load-balancer/.mocha.js create mode 100644 packages/xo-server-load-balancer/.mocha.opts create mode 100644 packages/xo-server-load-balancer/.npmignore create mode 100644 packages/xo-server-load-balancer/.travis.yml create mode 100644 packages/xo-server-load-balancer/README.md create mode 100644 packages/xo-server-load-balancer/package.json create mode 100644 packages/xo-server-load-balancer/src/index.js create mode 100644 packages/xo-server-load-balancer/src/index.spec.js diff --git a/packages/xo-server-load-balancer/.babelrc b/packages/xo-server-load-balancer/.babelrc new file mode 100644 index 000000000..bc055f47f --- /dev/null +++ b/packages/xo-server-load-balancer/.babelrc @@ -0,0 +1,11 @@ +{ + "comments": false, + "compact": true, + "optional": [ + "es7.asyncFunctions", + "es7.decorators", + "es7.exportExtensions", + "es7.functionBind", + "runtime" + ] +} diff --git a/packages/xo-server-load-balancer/.editorconfig b/packages/xo-server-load-balancer/.editorconfig new file mode 100644 index 000000000..da21ef4c5 --- /dev/null +++ b/packages/xo-server-load-balancer/.editorconfig @@ -0,0 +1,65 @@ +# http://EditorConfig.org +# +# Julien Fontanet's configuration +# https://gist.github.com/julien-f/8096213 + +# Top-most EditorConfig file. +root = true + +# Common config. +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespaces = true + +# CoffeeScript +# +# https://github.com/polarmobile/coffeescript-style-guide/blob/master/README.md +[*.{,lit}coffee] +indent_size = 2 +indent_style = space + +# Markdown +[*.{md,mdwn,mdown,markdown}] +indent_size = 4 +indent_style = space + +# Package.json +# +# This indentation style is the one used by npm. +[/package.json] +indent_size = 2 +indent_style = space + +# Jade +[*.jade] +indent_size = 2 +indent_style = space + +# JavaScript +# +# Two spaces seems to be the standard most common style, at least in +# Node.js (http://nodeguide.com/style.html#tabs-vs-spaces). +[*.js] +indent_size = 2 +indent_style = space + +# Less +[*.less] +indent_size = 2 +indent_style = space + +# Sass +# +# Style used for http://libsass.com +[*.s[ac]ss] +indent_size = 2 +indent_style = space + +# YAML +# +# Only spaces are allowed. +[*.yaml] +indent_size = 2 +indent_style = space diff --git a/packages/xo-server-load-balancer/.gitignore b/packages/xo-server-load-balancer/.gitignore new file mode 100644 index 000000000..6959be1cf --- /dev/null +++ b/packages/xo-server-load-balancer/.gitignore @@ -0,0 +1,9 @@ +/.nyc_output/ +/bower_components/ +/dist/ + +npm-debug.log +npm-debug.log.* + +!node_modules/* +node_modules/*/ diff --git a/packages/xo-server-load-balancer/.mocha.js b/packages/xo-server-load-balancer/.mocha.js new file mode 100644 index 000000000..e6d84e403 --- /dev/null +++ b/packages/xo-server-load-balancer/.mocha.js @@ -0,0 +1,5 @@ +Error.stackTraceLimit = 100 + +try { require('trace') } catch (_) {} +try { require('clarify') } catch (_) {} +try { require('source-map-support/register') } catch (_) {} diff --git a/packages/xo-server-load-balancer/.mocha.opts b/packages/xo-server-load-balancer/.mocha.opts new file mode 100644 index 000000000..6cfd94898 --- /dev/null +++ b/packages/xo-server-load-balancer/.mocha.opts @@ -0,0 +1 @@ +--require ./.mocha.js diff --git a/packages/xo-server-load-balancer/.npmignore b/packages/xo-server-load-balancer/.npmignore new file mode 100644 index 000000000..c31ee82cb --- /dev/null +++ b/packages/xo-server-load-balancer/.npmignore @@ -0,0 +1,10 @@ +/examples/ +example.js +example.js.map +*.example.js +*.example.js.map + +/test/ +/tests/ +*.spec.js +*.spec.js.map diff --git a/packages/xo-server-load-balancer/.travis.yml b/packages/xo-server-load-balancer/.travis.yml new file mode 100644 index 000000000..502095fce --- /dev/null +++ b/packages/xo-server-load-balancer/.travis.yml @@ -0,0 +1,10 @@ +language: node_js +node_js: + - 'stable' + - '4' + - '0.12' + - '0.10' + +# Use containers. +# http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +sudo: false diff --git a/packages/xo-server-load-balancer/README.md b/packages/xo-server-load-balancer/README.md new file mode 100644 index 000000000..53c7d2b94 --- /dev/null +++ b/packages/xo-server-load-balancer/README.md @@ -0,0 +1,58 @@ +# xo-server-load-balancer [![Build Status](https://travis-ci.org/vatesfr/xo-server-load-balancer.png?branch=master)](https://travis-ci.org/vatesfr/xo-server-load-balancer) + +XO-Server plugin that allows load balancing. + +## Install + +Go inside your `xo-server` folder and install it: + +``` +> npm install xo-server-load-balancer +``` + +## Usage + +Edit your `xo-server` configuration and add the plugin name in the `plugins` section. + +```yaml +plugins: + + xo-server-load-balancer: +``` + +## Development + +### Installing dependencies + +``` +> npm install +``` + +### Compilation + +The sources files are watched and automatically recompiled on changes. + +``` +> npm run dev +``` + +### Tests + +``` +> npm run test-dev +``` + +## Contributions + +Contributions are *very* welcomed, either on the documentation or on +the code. + +You may: + +- report any [issue](https://github.com/vatesfr/xo-server-load-balancer/issues) + you've encountered; +- fork and create a pull request. + +## License + +AGPL3 © [Vates SAS](http://vates.fr) diff --git a/packages/xo-server-load-balancer/package.json b/packages/xo-server-load-balancer/package.json new file mode 100644 index 000000000..c09840484 --- /dev/null +++ b/packages/xo-server-load-balancer/package.json @@ -0,0 +1,69 @@ +{ + "name": "xo-server-load-balancer", + "version": "0.0.1", + "license": "AGPL-3.0", + "description": "Load balancer for XO-Server", + "keywords": [ + "load", + "balancer", + "server", + "pool", + "host" + ], + "homepage": "https://github.com/vatesfr/xo-server-load-balancer", + "bugs": "https://github.com/vatesfr/xo-server-load-balancer/issues", + "repository": { + "type": "git", + "url": "https://github.com/vatesfr/xo-server-load-balancer" + }, + "author": { + "name": "Julien Fontanet", + "email": "julien.fontanet@isonoe.net" + }, + "preferGlobal": false, + "main": "dist/", + "bin": {}, + "files": [ + "dist/" + ], + "engines": { + "node": ">=0.12" + }, + "dependencies": { + "babel-runtime": "^5.8.34", + "event-to-promise": "^0.6.0", + "lodash.filter": "^4.2.0", + "lodash.intersection": "^4.1.0", + "lodash.map": "^4.2.0", + "lodash.uniq": "^4.2.0", + "node-xmpp-client": "^3.0.0" + }, + "devDependencies": { + "babel": "^5.8.34", + "babel-eslint": "^4.1.6", + "clarify": "^1.0.5", + "dependency-check": "^2.5.1", + "mocha": "^2.3.4", + "must": "^0.13.1", + "nyc": "^3.2.2", + "source-map-support": "^0.3.3", + "standard": "^5.4.1", + "trace": "^2.0.1" + }, + "scripts": { + "build": "babel --source-maps --out-dir=dist/ src/", + "dev": "babel --watch --source-maps --out-dir=dist/ src/", + "dev-test": "mocha --opts .mocha.opts --watch --reporter=min \"dist/**/*.spec.js\"", + "lint": "standard", + "depcheck": "dependency-check ./package.json", + "posttest": "npm run lint && npm run depcheck", + "prepublish": "npm run build", + "test": "nyc mocha --opts .mocha.opts \"dist/**/*.spec.js\"" + }, + "standard": { + "ignore": [ + "dist/**" + ], + "parser": "babel-eslint" + } +} diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js new file mode 100644 index 000000000..a0e21d4d5 --- /dev/null +++ b/packages/xo-server-load-balancer/src/index.js @@ -0,0 +1,139 @@ +import * as mapToArray from 'lodash.map' +import filter from 'lodash.filter' +import intersection from 'lodash.intersection' +import uniq from 'lodash.uniq' +import { CronJob } from 'cron' + +export const configurationSchema = { + type: 'object', + + properties: { + + }, + + additionalProperties: false, + required: [] +} + +// =================================================================== + +const BALANCING_MODE_PERFORMANCE = 0 + +// Delay between each ressources evaluation in minutes. +// MIN: 1, MAX: 59. +const EXECUTION_DELAY = 1 + +export const makeCronJob = (cronPattern, fn) => { + let running + + const job = new CronJob(cronPattern, async () => { + if (running) { + return + } + + running = true + + try { + await fn() + } catch (error) { + console.error('[WARN] scheduled function:', error && error.stack || error) + } finally { + running = false + } + }) + + return job +} + +class Plan { + constructor (xo, poolUuids, { + mode = BALANCING_MODE_PERFORMANCE + } = {}) { + this.xo = xo + this._mode = mode + this._poolUuids = poolUuids + } + + async execute () { + const stats = await this._getStats( + this._getHosts() + ) + + stats // FIXME + } + + // Compute hosts for each pool. They can change over time. + _getHosts () { + const objects = filter(this.xo.getObjects(), { type: 'host' }) + const hosts = {} + + for (const poolUuid of this._objects) { + hosts[poolUuid] = filter(objects, { uuid: poolUuid }) + } + + return hosts + } + + async _getStats (hosts) { + const promises = [] + + for (const poolUuid of hosts) { + promises.push(Promise.all( + mapToArray(hosts[poolUuid], host => + this.xo.getXapiHostStats(host, 'seconds') + ) + )) + } + + return await Promise.all(promises) + } +} + +class LoadBalancerPlugin { + constructor (xo) { + this.xo = xo + this._plans = [] + this._poolUuids = [] // Used pools. + } + + configure ({...conf}) { + this._cronJob = makeCronJob(`*/${EXECUTION_DELAY} * * * *`, ::this._executePlans) + } + + load () { + this._cronJob.start() + } + + unload () { + this._cronJob.stop() + } + + addPlan (name, poolUuids, mode, behavior) { + poolUuids = uniq(poolUuids) + + // Check already used pools. + if (intersection(poolUuids, this._poolUuids) > 0) { + throw new Error(`Pool(s) already included in an other plan: ${poolUuids}`) + } + + const { xo } = this + + // Test if each pool exists. + for (const poolUuid of poolUuids) { + console.log(poolUuid) + xo.getObject(poolUuid) + } + + this._plans.push(new Plan(xo, poolUuids)) + } + + async _executePlans () { + await Promise.all( + mapToArray(this._plans, plan => plan.execute()) + ) + } +} + +// =================================================================== + +export default ({ xo }) => new LoadBalancerPlugin(xo) diff --git a/packages/xo-server-load-balancer/src/index.spec.js b/packages/xo-server-load-balancer/src/index.spec.js new file mode 100644 index 000000000..6e9c776d2 --- /dev/null +++ b/packages/xo-server-load-balancer/src/index.spec.js @@ -0,0 +1,13 @@ +/* eslint-env mocha */ + +import expect from 'must' + +// =================================================================== + +import myLib from './' + +// =================================================================== + +describe('myLib', () => { + // TODO +}) From 2593743746004d46d2654881f27400cdd629167e Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 24 Feb 2016 10:53:48 +0100 Subject: [PATCH 02/50] Json schema (prototype). --- packages/xo-server-load-balancer/package.json | 1 + packages/xo-server-load-balancer/src/index.js | 60 +++++++++++++++---- 2 files changed, 49 insertions(+), 12 deletions(-) diff --git a/packages/xo-server-load-balancer/package.json b/packages/xo-server-load-balancer/package.json index c09840484..dab5fe0fe 100644 --- a/packages/xo-server-load-balancer/package.json +++ b/packages/xo-server-load-balancer/package.json @@ -31,6 +31,7 @@ }, "dependencies": { "babel-runtime": "^5.8.34", + "cron": "^1.1.0", "event-to-promise": "^0.6.0", "lodash.filter": "^4.2.0", "lodash.intersection": "^4.1.0", diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index a0e21d4d5..d29983efb 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -1,18 +1,62 @@ -import * as mapToArray from 'lodash.map' import filter from 'lodash.filter' import intersection from 'lodash.intersection' import uniq from 'lodash.uniq' import { CronJob } from 'cron' +import { default as mapToArray } from 'lodash.map' export const configurationSchema = { type: 'object', properties: { + plans: { + type: 'array', + title: 'plans', + description: 'an array of plans', + items: { + type: 'object', + title: 'plan', + + properties: { + name: { + type: 'string' + }, + mode: { + type: 'object', + + properties: { + performance: { type: 'string' }, + density: { type: 'string' } + }, + + oneOf: [ + { required: ['performance'] }, + { required: ['density'] } + ] + }, + behavior: { + type: 'object', + + properties: { + low: { type: 'string' }, + normal: { type: 'string' }, + aggressive: { type: 'string' } + }, + + oneOf: [ + { required: ['low'] }, + { required: ['normal'] }, + { required: ['aggressive'] } + ] + } + } + }, + minItems: 1, + uniqueItems: true + } }, - additionalProperties: false, - required: [] + additionalProperties: false } // =================================================================== @@ -116,15 +160,7 @@ class LoadBalancerPlugin { throw new Error(`Pool(s) already included in an other plan: ${poolUuids}`) } - const { xo } = this - - // Test if each pool exists. - for (const poolUuid of poolUuids) { - console.log(poolUuid) - xo.getObject(poolUuid) - } - - this._plans.push(new Plan(xo, poolUuids)) + this._plans.push(new Plan(this.xo, poolUuids)) } async _executePlans () { From 3013fa86b6ac3b236be9190d1100ce84bc1b3e84 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 24 Feb 2016 12:50:03 +0100 Subject: [PATCH 03/50] Configure implem. --- packages/xo-server-load-balancer/src/index.js | 118 +++++++++++++----- 1 file changed, 89 insertions(+), 29 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index d29983efb..5847fe2bb 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -4,6 +4,21 @@ import uniq from 'lodash.uniq' import { CronJob } from 'cron' import { default as mapToArray } from 'lodash.map' +// =================================================================== + +const MODE_PERFORMANCE = 0 +const MODE_DENSITY = 1 + +const BEHAVIOR_LOW = 0 +const BEHAVIOR_NORMAL = 1 +const BEHAVIOR_AGGRESSIVE = 2 + +// Delay between each ressources evaluation in minutes. +// MIN: 1, MAX: 59. +const EXECUTION_DELAY = 1 + +// =================================================================== + export const configurationSchema = { type: 'object', @@ -21,12 +36,13 @@ export const configurationSchema = { name: { type: 'string' }, + mode: { type: 'object', properties: { - performance: { type: 'string' }, - density: { type: 'string' } + performance: { type: 'boolean' }, + density: { type: 'boolean' } }, oneOf: [ @@ -34,13 +50,14 @@ export const configurationSchema = { { required: ['density'] } ] }, + behavior: { type: 'object', properties: { - low: { type: 'string' }, - normal: { type: 'string' }, - aggressive: { type: 'string' } + low: { type: 'boolean' }, + normal: { type: 'boolean' }, + aggressive: { type: 'boolean' } }, oneOf: [ @@ -48,11 +65,23 @@ export const configurationSchema = { { required: ['normal'] }, { required: ['aggressive'] } ] + }, + + pools: { + type: 'array', + title: 'list of pools id where to apply the policy', + + items: { + type: 'string', + $objectType: 'pool' + }, + + minItems: 1, + uniqueItems: true } } }, - minItems: 1, - uniqueItems: true + minItems: 1 } }, @@ -61,13 +90,7 @@ export const configurationSchema = { // =================================================================== -const BALANCING_MODE_PERFORMANCE = 0 - -// Delay between each ressources evaluation in minutes. -// MIN: 1, MAX: 59. -const EXECUTION_DELAY = 1 - -export const makeCronJob = (cronPattern, fn) => { +const makeCronJob = (cronPattern, fn) => { let running const job = new CronJob(cronPattern, async () => { @@ -89,13 +112,15 @@ export const makeCronJob = (cronPattern, fn) => { return job } +// =================================================================== + class Plan { - constructor (xo, poolUuids, { - mode = BALANCING_MODE_PERFORMANCE - } = {}) { + constructor (xo, { name, mode, behavior, poolIds }) { this.xo = xo + this._name = name // Useful ? this._mode = mode - this._poolUuids = poolUuids + this._behavior = behavior + this._poolIds = poolIds } async execute () { @@ -136,12 +161,47 @@ class Plan { class LoadBalancerPlugin { constructor (xo) { this.xo = xo - this._plans = [] - this._poolUuids = [] // Used pools. + this._cronJob = makeCronJob(`*/${EXECUTION_DELAY} * * * *`, ::this._executePlans) } - configure ({...conf}) { - this._cronJob = makeCronJob(`*/${EXECUTION_DELAY} * * * *`, ::this._executePlans) + async configure ({ plans }) { + const cronJob = this._cronJob + const enabled = cronJob.running + + if (enabled) { + cronJob.stop() + } + + // Wait until all old plans stopped running. + await this._plansPromise + + this._plans = [] + this._poolIds = [] // Used pools. + + if (plans) { + for (const plan of plans) { + const mode = plan.mode.performance + ? MODE_PERFORMANCE + : MODE_DENSITY + + const { behavior: planBehavior } = plan + let behavior + + if (planBehavior.low) { + behavior = BEHAVIOR_LOW + } else if (planBehavior.normal) { + behavior = BEHAVIOR_NORMAL + } else { + behavior = BEHAVIOR_AGGRESSIVE + } + + this._addPlan({ name: plan.name, mode, behavior, poolIds: plan.pools }) + } + } + + if (enabled) { + cronJob.start() + } } load () { @@ -152,21 +212,21 @@ class LoadBalancerPlugin { this._cronJob.stop() } - addPlan (name, poolUuids, mode, behavior) { - poolUuids = uniq(poolUuids) + _addPlan (plan) { + const poolIds = plan.poolIds = uniq(plan.poolIds) // Check already used pools. - if (intersection(poolUuids, this._poolUuids) > 0) { - throw new Error(`Pool(s) already included in an other plan: ${poolUuids}`) + if (intersection(poolIds, this._poolIds) > 0) { + throw new Error(`Pool(s) already included in an other plan: ${poolIds}`) } - this._plans.push(new Plan(this.xo, poolUuids)) + this._plans.push(new Plan(this.xo, plan)) } async _executePlans () { - await Promise.all( + return (this._plansPromise = Promise.all( mapToArray(this._plans, plan => plan.execute()) - ) + )) } } From cb97e37c15f81ae56ddbe795baac9445af517dfb Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 24 Feb 2016 15:27:11 +0100 Subject: [PATCH 04/50] Get stats for host. --- packages/xo-server-load-balancer/src/index.js | 64 +++++++++++++------ 1 file changed, 46 insertions(+), 18 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 5847fe2bb..f15b15440 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -25,7 +25,6 @@ export const configurationSchema = { properties: { plans: { type: 'array', - title: 'plans', description: 'an array of plans', items: { @@ -69,7 +68,7 @@ export const configurationSchema = { pools: { type: 'array', - title: 'list of pools id where to apply the policy', + description: 'list of pools id where to apply the policy', items: { type: 'string', @@ -124,37 +123,57 @@ class Plan { } async execute () { - const stats = await this._getStats( - this._getHosts() + const stats = await this._getHostsStatsByPool( + this._getHostsByPool() ) - stats // FIXME + console.log(stats) } // Compute hosts for each pool. They can change over time. - _getHosts () { + _getHostsByPool () { const objects = filter(this.xo.getObjects(), { type: 'host' }) - const hosts = {} + const hostsByPool = {} - for (const poolUuid of this._objects) { - hosts[poolUuid] = filter(objects, { uuid: poolUuid }) + for (const poolId of this._poolIds) { + hostsByPool[poolId] = filter(objects, { '$poolId': poolId }) } - return hosts + return hostsByPool } - async _getStats (hosts) { + async _getHostsStatsByPool (hostsByPool) { const promises = [] - for (const poolUuid of hosts) { - promises.push(Promise.all( - mapToArray(hosts[poolUuid], host => - this.xo.getXapiHostStats(host, 'seconds') - ) - )) + for (const poolId in hostsByPool) { + promises.push( + Promise.all( + mapToArray(hostsByPool[poolId], host => + this.xo.getXapiHostStats(host, 'seconds') + ) + ).then(stats => { + const obj = {} + let i = 0 + + for (const host of hostsByPool[poolId]) { + obj[host.id] = stats[i++] + } + + return obj + }) + ) } - return await Promise.all(promises) + return Promise.all(promises).then(statsArray => { + const obj = {} + let i = 0 + + for (const poolId in hostsByPool) { + obj[poolId] = statsArray[i++] + } + + return obj + }) } } @@ -199,6 +218,15 @@ class LoadBalancerPlugin { } } + // TMP + this._addPlan({ + name: 'Test plan', + mode: MODE_PERFORMANCE, + behavior: BEHAVIOR_AGGRESSIVE, + poolIds: [ '313624ab-0958-bb1e-45b5-7556a463a10b' ] + }) + this._executePlans() + if (enabled) { cronJob.start() } From d3f52cdd1a19d37e627dc090b932205852d12f6a Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 25 Feb 2016 13:10:27 +0100 Subject: [PATCH 05/50] Compute exceeded hosts. --- packages/xo-server-load-balancer/package.json | 1 + packages/xo-server-load-balancer/src/index.js | 193 ++++++++++++------ 2 files changed, 136 insertions(+), 58 deletions(-) diff --git a/packages/xo-server-load-balancer/package.json b/packages/xo-server-load-balancer/package.json index dab5fe0fe..d8b0197a5 100644 --- a/packages/xo-server-load-balancer/package.json +++ b/packages/xo-server-load-balancer/package.json @@ -34,6 +34,7 @@ "cron": "^1.1.0", "event-to-promise": "^0.6.0", "lodash.filter": "^4.2.0", + "lodash.includes": "^4.1.0", "lodash.intersection": "^4.1.0", "lodash.map": "^4.2.0", "lodash.uniq": "^4.2.0", diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index f15b15440..1868b9ba3 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -1,21 +1,32 @@ import filter from 'lodash.filter' import intersection from 'lodash.intersection' import uniq from 'lodash.uniq' +import includes from 'lodash.includes' import { CronJob } from 'cron' import { default as mapToArray } from 'lodash.map' // =================================================================== -const MODE_PERFORMANCE = 0 -const MODE_DENSITY = 1 +const PERFORMANCE_MODE = 0 +const DENSITY_MODE = 1 -const BEHAVIOR_LOW = 0 -const BEHAVIOR_NORMAL = 1 -const BEHAVIOR_AGGRESSIVE = 2 +const LOW_BEHAVIOR = 0 +const NORMAL_BEHAVIOR = 1 +const AGGRESSIVE_BEHAVIOR = 2 // Delay between each ressources evaluation in minutes. -// MIN: 1, MAX: 59. +// Must be less than MINUTES_OF_HISTORICAL_DATA. const EXECUTION_DELAY = 1 +const MINUTES_OF_HISTORICAL_DATA = 30 + +// Threshold cpu in percent. +// const CRITICAL_THRESHOLD_CPU = 90 +const HIGH_THRESHOLD_CPU = 76.5 +// const LOW_THRESHOLD_CPU = 22.5 + +// const CRITICAL_THRESHOLD_FREE_MEMORY = 51 +const HIGH_THRESHOLD_FREE_MEMORY = 63.75 +// const LOW_THRESHOLD_FREE_MEMORY = 1020 // =================================================================== @@ -111,6 +122,66 @@ const makeCronJob = (cronPattern, fn) => { return job } +function computeAverage (values, nPoints = values.length) { + let sum = 0 + let tot = 0 + + for (let i = values.length - nPoints; i < values.length; i++) { + const value = values[i] + + sum += value || 0 + + if (value) { + tot += 1 + } + } + + return sum / tot +} + +function computeRessourcesAverage (hosts, hostsStats, nPoints) { + const averages = {} + + for (const host of hosts) { + const hostId = host.id + const hostAverages = averages[hostId] = {} + const { stats } = hostsStats[hostId] + + hostAverages.cpus = computeAverage( + mapToArray(stats.cpus, cpu => computeAverage(cpu, nPoints)) + ) + hostAverages.memoryFree = computeAverage(stats.memoryFree, nPoints) + } + + return averages +} + +function checkRessourcesThresholds (hosts, averages) { + return filter(hosts, host => { + const hostAverages = averages[host.id] + + return ( + hostAverages.cpus >= HIGH_THRESHOLD_CPU || + hostAverages.memoryFree >= HIGH_THRESHOLD_FREE_MEMORY + ) + }) +} + +function computeRessourcesAverageWithRatio (hosts, averages1, averages2, ratio) { + const averages = {} + + for (const host of hosts) { + const hostId = host.id + const hostAverages = averages[hostId] = {} + + for (const averageName in hostAverages) { + hostAverages[averageName] = averages1[averageName] * ratio + averages2[averageName] * (1 - ratio) + } + } + + return averages +} + // =================================================================== class Plan { @@ -123,60 +194,67 @@ class Plan { } async execute () { - const stats = await this._getHostsStatsByPool( - this._getHostsByPool() - ) + if (this._mode === PERFORMANCE_MODE) { + await this._executeInPerformanceMode() + } else { + await this._executeInDensityMode() + } + } - console.log(stats) + async _executeInPerformanceMode () { + const hosts = this._getHosts() + const hostsStats = await this._getHostsStats(hosts, 'minutes') + + // 1. Check if a ressource's utilization exceeds threshold. + const avgNow = computeRessourcesAverage(hosts, hostsStats, EXECUTION_DELAY) + let exceeded = checkRessourcesThresholds(hosts, avgNow) + + // No ressource's utilization problem. + if (exceeded.length === 0) { + return + } + + // 2. Check in the last 30 min interval with ratio. + const avgBefore = computeRessourcesAverage(exceeded, hostsStats, MINUTES_OF_HISTORICAL_DATA) + const avgWithRatio = computeRessourcesAverageWithRatio(exceeded, avgNow, avgBefore, 0.75) + exceeded = checkRessourcesThresholds(hosts, avgWithRatio) + + // No ressource's utilization problem. + if (exceeded.length === 0) { + return + } + } + + async _executeInDensityMode () { + throw new Error('not yet implemented') } // Compute hosts for each pool. They can change over time. - _getHostsByPool () { - const objects = filter(this.xo.getObjects(), { type: 'host' }) - const hostsByPool = {} - - for (const poolId of this._poolIds) { - hostsByPool[poolId] = filter(objects, { '$poolId': poolId }) - } - - return hostsByPool + _getHosts () { + return filter(this.xo.getObjects(), object => + object.type === 'host' && includes(this._poolIds, object.$poolId) + ) } - async _getHostsStatsByPool (hostsByPool) { - const promises = [] + async _getHostsStats (hosts, granularity) { + const hostsStats = {} - for (const poolId in hostsByPool) { - promises.push( - Promise.all( - mapToArray(hostsByPool[poolId], host => - this.xo.getXapiHostStats(host, 'seconds') - ) - ).then(stats => { - const obj = {} - let i = 0 + await Promise.all(mapToArray(hosts, host => + this.xo.getXapiHostStats(host, granularity).then(hostStats => { + hostsStats[host.id] = { + nPoints: hostStats.stats.cpus[0].length, + stats: hostStats.stats, + averages: {} + } + }) + )) - for (const host of hostsByPool[poolId]) { - obj[host.id] = stats[i++] - } - - return obj - }) - ) - } - - return Promise.all(promises).then(statsArray => { - const obj = {} - let i = 0 - - for (const poolId in hostsByPool) { - obj[poolId] = statsArray[i++] - } - - return obj - }) + return hostsStats } } +// =================================================================== + class LoadBalancerPlugin { constructor (xo) { this.xo = xo @@ -200,18 +278,18 @@ class LoadBalancerPlugin { if (plans) { for (const plan of plans) { const mode = plan.mode.performance - ? MODE_PERFORMANCE - : MODE_DENSITY + ? PERFORMANCE_MODE + : DENSITY_MODE const { behavior: planBehavior } = plan let behavior if (planBehavior.low) { - behavior = BEHAVIOR_LOW + behavior = LOW_BEHAVIOR } else if (planBehavior.normal) { - behavior = BEHAVIOR_NORMAL + behavior = NORMAL_BEHAVIOR } else { - behavior = BEHAVIOR_AGGRESSIVE + behavior = AGGRESSIVE_BEHAVIOR } this._addPlan({ name: plan.name, mode, behavior, poolIds: plan.pools }) @@ -221,11 +299,10 @@ class LoadBalancerPlugin { // TMP this._addPlan({ name: 'Test plan', - mode: MODE_PERFORMANCE, - behavior: BEHAVIOR_AGGRESSIVE, + mode: PERFORMANCE_MODE, + behavior: AGGRESSIVE_BEHAVIOR, poolIds: [ '313624ab-0958-bb1e-45b5-7556a463a10b' ] }) - this._executePlans() if (enabled) { cronJob.start() @@ -251,7 +328,7 @@ class LoadBalancerPlugin { this._plans.push(new Plan(this.xo, plan)) } - async _executePlans () { + _executePlans () { return (this._plansPromise = Promise.all( mapToArray(this._plans, plan => plan.execute()) )) From 274884ef4d538eb9d0720b16d1399e8d8583801c Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 25 Feb 2016 16:06:46 +0100 Subject: [PATCH 06/50] Reorder exceeded hosts by priority. --- packages/xo-server-load-balancer/src/index.js | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 1868b9ba3..af1193f9e 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -126,7 +126,9 @@ function computeAverage (values, nPoints = values.length) { let sum = 0 let tot = 0 - for (let i = values.length - nPoints; i < values.length; i++) { + const { length } = values + + for (let i = length - nPoints; i < length; i++) { const value = values[i] sum += value || 0 @@ -223,6 +225,17 @@ class Plan { if (exceeded.length === 0) { return } + + // 3. Reorder the exceeded hosts by priority. + exceeded.sort((a, b) => { + a = avgWithRatio[a.id] + b = avgWithRatio[b.id] + + return (b.cpus - a.cpus) || (a.memoryFree - b.memoryFree) + }) + + // 4. Search bests combinations... + // TODO } async _executeInDensityMode () { From bec2e3b4a08daca9e3471b1ecfb3472bb20740b2 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Fri, 26 Feb 2016 10:20:15 +0100 Subject: [PATCH 07/50] getVms & getVmsStats functions. --- packages/xo-server-load-balancer/src/index.js | 64 ++++++++++++++++++- 1 file changed, 62 insertions(+), 2 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index af1193f9e..397109c22 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -217,7 +217,7 @@ class Plan { } // 2. Check in the last 30 min interval with ratio. - const avgBefore = computeRessourcesAverage(exceeded, hostsStats, MINUTES_OF_HISTORICAL_DATA) + const avgBefore = computeRessourcesAverage(hosts, hostsStats, MINUTES_OF_HISTORICAL_DATA) const avgWithRatio = computeRessourcesAverageWithRatio(exceeded, avgNow, avgBefore, 0.75) exceeded = checkRessourcesThresholds(hosts, avgWithRatio) @@ -235,13 +235,31 @@ class Plan { }) // 4. Search bests combinations... - // TODO + const optimizations = await this._computeOptimizations(hosts, exceeded, { + now: avgNow, + before: avgBefore, + withRatio: avgWithRatio + }) + + // 5. Apply optimizations if necessary. + await this._applyOptimizations(optimizations) } async _executeInDensityMode () { throw new Error('not yet implemented') } + async _computeOptimizations (hosts, exceeded, hostsAverages) { + const vms = await this._getVms(exceeded) + const vmsStats = await this._getVmsStats(vms, 'minutes') + + // TODO + } + + async _applyOptimizations (optimizations) { + throw new Error('not yet implemented') + } + // Compute hosts for each pool. They can change over time. _getHosts () { return filter(this.xo.getObjects(), object => @@ -264,6 +282,48 @@ class Plan { return hostsStats } + + async _getVms (hosts) { + const objects = this.xo.getObjects() + const vms = {} + + for (const host of hosts) { + const { id } = host + + vms[id] = filter(objects, object => + object.type === 'VM' && + object.power_state === 'Running' && + object.$container === id + ) + } + + return vms + } + + async _getVmsStats (vms, granularity) { + const promises = [] + const vmsStats = {} + + for (const hostId in vms) { + const hostVmsStats = vmsStats[hostId] = {} + + promises.push( + Promise.all(mapToArray(vms[hostId], vm => + this.xo.getXapiVmStats(vm, granularity).then(vmStats => { + hostVmsStats[vm.id] = { + nPoints: vmStats.stats.cpus[0].length, + stats: vmStats.stats, + averages: {} + } + }) + )) + ) + } + + await Promise.all(promises) + + return vmsStats + } } // =================================================================== From 087a71367d2ed2e345e7ce7973141ed98272b6a5 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Fri, 26 Feb 2016 11:07:22 +0100 Subject: [PATCH 08/50] Optimizations only on one host ! --- packages/xo-server-load-balancer/src/index.js | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 397109c22..45a4fc2c0 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -209,33 +209,33 @@ class Plan { // 1. Check if a ressource's utilization exceeds threshold. const avgNow = computeRessourcesAverage(hosts, hostsStats, EXECUTION_DELAY) - let exceeded = checkRessourcesThresholds(hosts, avgNow) + let exceededHosts = checkRessourcesThresholds(hosts, avgNow) // No ressource's utilization problem. - if (exceeded.length === 0) { + if (exceededHosts.length === 0) { return } // 2. Check in the last 30 min interval with ratio. const avgBefore = computeRessourcesAverage(hosts, hostsStats, MINUTES_OF_HISTORICAL_DATA) - const avgWithRatio = computeRessourcesAverageWithRatio(exceeded, avgNow, avgBefore, 0.75) - exceeded = checkRessourcesThresholds(hosts, avgWithRatio) + const avgWithRatio = computeRessourcesAverageWithRatio(exceededHosts, avgNow, avgBefore, 0.75) + exceededHosts = checkRessourcesThresholds(exceededHosts, avgWithRatio) // No ressource's utilization problem. - if (exceeded.length === 0) { + if (exceededHosts.length === 0) { return } // 3. Reorder the exceeded hosts by priority. - exceeded.sort((a, b) => { + exceededHosts.sort((a, b) => { a = avgWithRatio[a.id] b = avgWithRatio[b.id] return (b.cpus - a.cpus) || (a.memoryFree - b.memoryFree) }) - // 4. Search bests combinations... - const optimizations = await this._computeOptimizations(hosts, exceeded, { + // 4. Search bests combinations for the worst host. + const optimizations = await this._computeOptimizations(exceededHosts[0], { now: avgNow, before: avgBefore, withRatio: avgWithRatio @@ -249,7 +249,8 @@ class Plan { throw new Error('not yet implemented') } - async _computeOptimizations (hosts, exceeded, hostsAverages) { + async _computeOptimizations (exceededHost, hostsAverages) { + // Get the vms and stats from exceeded hosts. const vms = await this._getVms(exceeded) const vmsStats = await this._getVmsStats(vms, 'minutes') From dee776742701a3eca896313f81336933684d68f4 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Fri, 26 Feb 2016 11:57:41 +0100 Subject: [PATCH 09/50] Get only the vms stats of the worst host. --- packages/xo-server-load-balancer/src/index.js | 63 +++++++------------ 1 file changed, 23 insertions(+), 40 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 45a4fc2c0..d786f4355 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -235,11 +235,12 @@ class Plan { }) // 4. Search bests combinations for the worst host. - const optimizations = await this._computeOptimizations(exceededHosts[0], { - now: avgNow, - before: avgBefore, - withRatio: avgWithRatio - }) + const toOptimize = exceededHosts[0] + const optimizations = await this._computeOptimizations( + toOptimize, + filter(hosts, host => host.id !== toOptimize.id), + avgWithRatio + ) // 5. Apply optimizations if necessary. await this._applyOptimizations(optimizations) @@ -251,10 +252,10 @@ class Plan { async _computeOptimizations (exceededHost, hostsAverages) { // Get the vms and stats from exceeded hosts. - const vms = await this._getVms(exceeded) + const vms = await this._getVms(exceededHost.id) const vmsStats = await this._getVmsStats(vms, 'minutes') - // TODO + } async _applyOptimizations (optimizations) { @@ -284,44 +285,26 @@ class Plan { return hostsStats } - async _getVms (hosts) { - const objects = this.xo.getObjects() - const vms = {} - - for (const host of hosts) { - const { id } = host - - vms[id] = filter(objects, object => - object.type === 'VM' && - object.power_state === 'Running' && - object.$container === id - ) - } - - return vms + async _getVms (hostId) { + return filter(this.xo.getObjects(), object => + object.type === 'VM' && + object.power_state === 'Running' && + object.$container === hostId + ) } async _getVmsStats (vms, granularity) { - const promises = [] const vmsStats = {} - for (const hostId in vms) { - const hostVmsStats = vmsStats[hostId] = {} - - promises.push( - Promise.all(mapToArray(vms[hostId], vm => - this.xo.getXapiVmStats(vm, granularity).then(vmStats => { - hostVmsStats[vm.id] = { - nPoints: vmStats.stats.cpus[0].length, - stats: vmStats.stats, - averages: {} - } - }) - )) - ) - } - - await Promise.all(promises) + await Promise.all(mapToArray(vms, vm => + this.xo.getXapiVmStats(vm, granularity).then(vmStats => { + vmsStats[vm.id] = { + nPoints: vmStats.stats.cpus[0].length, + stats: vmStats.stats, + averages: {} + } + }) + )) return vmsStats } From 7abba0a69b50cd2b742526d10745575fe33443f5 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Fri, 26 Feb 2016 15:28:00 +0100 Subject: [PATCH 10/50] Many fixes. Compute real cpu usage of vms. --- packages/xo-server-load-balancer/src/index.js | 57 ++++++++++++------- 1 file changed, 35 insertions(+), 22 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index d786f4355..6d5ee684d 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -21,7 +21,7 @@ const MINUTES_OF_HISTORICAL_DATA = 30 // Threshold cpu in percent. // const CRITICAL_THRESHOLD_CPU = 90 -const HIGH_THRESHOLD_CPU = 76.5 +const HIGH_THRESHOLD_CPU = 0 // 76.5 // const LOW_THRESHOLD_CPU = 22.5 // const CRITICAL_THRESHOLD_FREE_MEMORY = 51 @@ -141,49 +141,55 @@ function computeAverage (values, nPoints = values.length) { return sum / tot } -function computeRessourcesAverage (hosts, hostsStats, nPoints) { +function computeRessourcesAverage (objects, objectsStats, nPoints) { const averages = {} - for (const host of hosts) { - const hostId = host.id - const hostAverages = averages[hostId] = {} - const { stats } = hostsStats[hostId] + for (const object of objects) { + const { id } = object + const { stats } = objectsStats[id] + const objectAverages = averages[id] = {} - hostAverages.cpus = computeAverage( + objectAverages.cpus = computeAverage( mapToArray(stats.cpus, cpu => computeAverage(cpu, nPoints)) ) - hostAverages.memoryFree = computeAverage(stats.memoryFree, nPoints) + objectAverages.memoryFree = computeAverage(stats.memoryFree, nPoints) + objectAverages.memoryUsed = computeAverage(stats.memoryUsed, nPoints) } return averages } -function checkRessourcesThresholds (hosts, averages) { - return filter(hosts, host => { - const hostAverages = averages[host.id] +function checkRessourcesThresholds (objects, averages) { + return filter(objects, object => { + const objectAverages = averages[object.id] return ( - hostAverages.cpus >= HIGH_THRESHOLD_CPU || - hostAverages.memoryFree >= HIGH_THRESHOLD_FREE_MEMORY + objectAverages.cpus >= HIGH_THRESHOLD_CPU || + objectAverages.memoryFree >= HIGH_THRESHOLD_FREE_MEMORY ) }) } -function computeRessourcesAverageWithRatio (hosts, averages1, averages2, ratio) { +function computeRessourcesAverageWithWeight (averages1, averages2, ratio) { const averages = {} - for (const host of hosts) { - const hostId = host.id - const hostAverages = averages[hostId] = {} + for (const id in averages1) { + const objectAverages = averages[id] = {} - for (const averageName in hostAverages) { - hostAverages[averageName] = averages1[averageName] * ratio + averages2[averageName] * (1 - ratio) + for (const averageName in averages1[id]) { + objectAverages[averageName] = averages1[id][averageName] * ratio + averages2[id][averageName] * (1 - ratio) } } return averages } +function setRealCpuAverageOfVms (vms, vmsAverages) { + for (const vm of vms) { + vmsAverages[vm.id].cpus /= vm.CPUs.number + } +} + // =================================================================== class Plan { @@ -218,7 +224,7 @@ class Plan { // 2. Check in the last 30 min interval with ratio. const avgBefore = computeRessourcesAverage(hosts, hostsStats, MINUTES_OF_HISTORICAL_DATA) - const avgWithRatio = computeRessourcesAverageWithRatio(exceededHosts, avgNow, avgBefore, 0.75) + const avgWithRatio = computeRessourcesAverageWithWeight(avgNow, avgBefore, 0.75) exceededHosts = checkRessourcesThresholds(exceededHosts, avgWithRatio) // No ressource's utilization problem. @@ -251,15 +257,22 @@ class Plan { } async _computeOptimizations (exceededHost, hostsAverages) { - // Get the vms and stats from exceeded hosts. const vms = await this._getVms(exceededHost.id) const vmsStats = await this._getVmsStats(vms, 'minutes') + const vmsAverages = computeRessourcesAverageWithWeight( + computeRessourcesAverage(vms, vmsStats, EXECUTION_DELAY), + computeRessourcesAverage(vms, vmsStats, MINUTES_OF_HISTORICAL_DATA), + 0.75 + ) + // Compute real CPU usage. Virtuals cpus to real cpu. + setRealCpuAverageOfVms(vms, vmsAverages) + // TODO } async _applyOptimizations (optimizations) { - throw new Error('not yet implemented') + // throw new Error('not yet implemented') } // Compute hosts for each pool. They can change over time. From 296141ad3de00bdcac6dbc6e61697dc25a65ddba Mon Sep 17 00:00:00 2001 From: wescoeur Date: Fri, 26 Feb 2016 16:42:11 +0100 Subject: [PATCH 11/50] Unsable: _computeOptimizations --- packages/xo-server-load-balancer/src/index.js | 52 ++++++++++++++++--- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 6d5ee684d..3f532bad7 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -1,7 +1,8 @@ import filter from 'lodash.filter' +import includes from 'lodash.includes' import intersection from 'lodash.intersection' import uniq from 'lodash.uniq' -import includes from 'lodash.includes' + import { CronJob } from 'cron' import { default as mapToArray } from 'lodash.map' @@ -190,6 +191,18 @@ function setRealCpuAverageOfVms (vms, vmsAverages) { } } +function searchObject (objects, fun) { + let object = 0 + + for (let i = 1; i < objects.length; i++) { + if (fun(object, objects[i]) > 0) { + object = objects[i] + } + } + + return object +} + // =================================================================== class Plan { @@ -232,8 +245,8 @@ class Plan { return } - // 3. Reorder the exceeded hosts by priority. - exceededHosts.sort((a, b) => { + // 3. Search the worst exceeded host by priority. + const toOptimize = searchObject(exceededHosts, (a, b) => { a = avgWithRatio[a.id] b = avgWithRatio[b.id] @@ -241,7 +254,6 @@ class Plan { }) // 4. Search bests combinations for the worst host. - const toOptimize = exceededHosts[0] const optimizations = await this._computeOptimizations( toOptimize, filter(hosts, host => host.id !== toOptimize.id), @@ -256,7 +268,7 @@ class Plan { throw new Error('not yet implemented') } - async _computeOptimizations (exceededHost, hostsAverages) { + async _computeOptimizations (exceededHost, hosts, hostsAverages) { const vms = await this._getVms(exceededHost.id) const vmsStats = await this._getVmsStats(vms, 'minutes') const vmsAverages = computeRessourcesAverageWithWeight( @@ -265,10 +277,36 @@ class Plan { 0.75 ) - // Compute real CPU usage. Virtuals cpus to real cpu. + // Compute real CPU usage. Virtuals cpus to reals cpus. setRealCpuAverageOfVms(vms, vmsAverages) - // TODO + const optimizations = {} + + // Sort vms by cpu usage. (higher to lower) + vms.sort((a, b) => + vmsAverages[b.id].cpus - vmsAverages[a.id].cpus + ) + + const exceededAverages = hostsAverages[exceededHosts.id] + + for (const vm of vms) { + // Search host with lower cpu usage. + const destination = searchObject(hosts, (a, b) => + hostsAverages[b.id].cpus - hostsAverages[a.id] + ) + const destinationAverages = hostsAverages[destination.id] + const vmAverages = vmsAverages[vm.id] + + // Unable to move the vm. + if ( + exceededAverages.cpus - vmAverages.cpu < destinationAverages.cpu + vmAverages.cpu || + + ) { + continue + } + } + + return optimizations } async _applyOptimizations (optimizations) { From f001b2c68f9b8265faca00a4bbdd19b8fd559a3b Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 2 Mar 2016 10:24:56 +0100 Subject: [PATCH 12/50] Unstable, plugin can migrate vm. (to test) --- packages/xo-server-load-balancer/src/index.js | 37 ++++++++++++------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 3f532bad7..88d96804f 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -22,7 +22,7 @@ const MINUTES_OF_HISTORICAL_DATA = 30 // Threshold cpu in percent. // const CRITICAL_THRESHOLD_CPU = 90 -const HIGH_THRESHOLD_CPU = 0 // 76.5 +const HIGH_THRESHOLD_CPU = 76.5 // const LOW_THRESHOLD_CPU = 22.5 // const CRITICAL_THRESHOLD_FREE_MEMORY = 51 @@ -153,8 +153,10 @@ function computeRessourcesAverage (objects, objectsStats, nPoints) { objectAverages.cpus = computeAverage( mapToArray(stats.cpus, cpu => computeAverage(cpu, nPoints)) ) + objectAverages.memoryFree = computeAverage(stats.memoryFree, nPoints) objectAverages.memoryUsed = computeAverage(stats.memoryUsed, nPoints) + objectAverages.memory = computeAverage(stats.memory, nPoints) } return averages @@ -192,7 +194,7 @@ function setRealCpuAverageOfVms (vms, vmsAverages) { } function searchObject (objects, fun) { - let object = 0 + let object = objects[0] for (let i = 1; i < objects.length; i++) { if (fun(object, objects[i]) > 0) { @@ -238,6 +240,7 @@ class Plan { // 2. Check in the last 30 min interval with ratio. const avgBefore = computeRessourcesAverage(hosts, hostsStats, MINUTES_OF_HISTORICAL_DATA) const avgWithRatio = computeRessourcesAverageWithWeight(avgNow, avgBefore, 0.75) + exceededHosts = checkRessourcesThresholds(exceededHosts, avgWithRatio) // No ressource's utilization problem. @@ -254,21 +257,18 @@ class Plan { }) // 4. Search bests combinations for the worst host. - const optimizations = await this._computeOptimizations( + await this._optimize( toOptimize, filter(hosts, host => host.id !== toOptimize.id), avgWithRatio ) - - // 5. Apply optimizations if necessary. - await this._applyOptimizations(optimizations) } async _executeInDensityMode () { throw new Error('not yet implemented') } - async _computeOptimizations (exceededHost, hosts, hostsAverages) { + async _optimize (exceededHost, hosts, hostsAverages) { const vms = await this._getVms(exceededHost.id) const vmsStats = await this._getVmsStats(vms, 'minutes') const vmsAverages = computeRessourcesAverageWithWeight( @@ -280,33 +280,44 @@ class Plan { // Compute real CPU usage. Virtuals cpus to reals cpus. setRealCpuAverageOfVms(vms, vmsAverages) - const optimizations = {} - // Sort vms by cpu usage. (higher to lower) vms.sort((a, b) => vmsAverages[b.id].cpus - vmsAverages[a.id].cpus ) - const exceededAverages = hostsAverages[exceededHosts.id] + const exceededAverages = hostsAverages[exceededHost.id] + const promises = [] + + const xapiSrc = this.xo.getXapi(exceededHost) for (const vm of vms) { // Search host with lower cpu usage. const destination = searchObject(hosts, (a, b) => hostsAverages[b.id].cpus - hostsAverages[a.id] - ) + ) const destinationAverages = hostsAverages[destination.id] const vmAverages = vmsAverages[vm.id] // Unable to move the vm. if ( exceededAverages.cpus - vmAverages.cpu < destinationAverages.cpu + vmAverages.cpu || - + destinationAverages.memoryFree < vmAverages.memory ) { continue } + + exceededAverages.cpus -= vmAverages.cpu + destinationAverages.cpu += vmAverages.cpu + + exceededAverages.memoryFree += vmAverages.memory + destinationAverages.memoryFree -= vmAverages.memory + + promises.push( + xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) + ) } - return optimizations + return } async _applyOptimizations (optimizations) { From b0bf18e2350d06db2a537e869788fe1e9e80d91f Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 2 Mar 2016 10:36:43 +0100 Subject: [PATCH 13/50] Debug messages. --- packages/xo-server-load-balancer/src/index.js | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 88d96804f..77d978f0a 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -8,6 +8,15 @@ import { default as mapToArray } from 'lodash.map' // =================================================================== +const noop = () => {} + +const LOAD_BALANCER_DEBUG = 1 +const debug = LOAD_BALANCER_DEBUG + ? str => console.log(`[load-balancer]${str}`) + : noop + +// =================================================================== + const PERFORMANCE_MODE = 0 const DENSITY_MODE = 1 @@ -155,7 +164,6 @@ function computeRessourcesAverage (objects, objectsStats, nPoints) { ) objectAverages.memoryFree = computeAverage(stats.memoryFree, nPoints) - objectAverages.memoryUsed = computeAverage(stats.memoryUsed, nPoints) objectAverages.memory = computeAverage(stats.memory, nPoints) } @@ -312,6 +320,8 @@ class Plan { exceededAverages.memoryFree += vmAverages.memory destinationAverages.memoryFree -= vmAverages.memory + debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id})`) + promises.push( xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) ) @@ -320,10 +330,6 @@ class Plan { return } - async _applyOptimizations (optimizations) { - // throw new Error('not yet implemented') - } - // Compute hosts for each pool. They can change over time. _getHosts () { return filter(this.xo.getObjects(), object => From d9bf7c7d124935f9bf87297356684b86a0fd5c7e Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 2 Mar 2016 11:20:28 +0100 Subject: [PATCH 14/50] Many fixes. Configure must operate if cron job is running. --- packages/xo-server-load-balancer/src/index.js | 40 +++++++++++++------ 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 77d978f0a..d0516034c 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -1,3 +1,6 @@ +import EventEmitter from 'events' + +import eventToPromise from 'event-to-promise' import filter from 'lodash.filter' import includes from 'lodash.includes' import intersection from 'lodash.intersection' @@ -6,6 +9,8 @@ import uniq from 'lodash.uniq' import { CronJob } from 'cron' import { default as mapToArray } from 'lodash.map' +class Emitter extends EventEmitter {} + // =================================================================== const noop = () => {} @@ -111,24 +116,30 @@ export const configurationSchema = { // =================================================================== const makeCronJob = (cronPattern, fn) => { - let running + const job = { + running: false, + emitter: new Emitter() + } - const job = new CronJob(cronPattern, async () => { - if (running) { + job.cron = new CronJob(cronPattern, async () => { + if (job.running) { return } - running = true + job.running = true try { await fn() } catch (error) { console.error('[WARN] scheduled function:', error && error.stack || error) } finally { - running = false + job.running = false + job.emitter.emit('finish') } }) + job.isEnabled = () => job.cron.running + return job } @@ -242,6 +253,7 @@ class Plan { // No ressource's utilization problem. if (exceededHosts.length === 0) { + debug('No ressource\'s utilization problem.') return } @@ -253,6 +265,7 @@ class Plan { // No ressource's utilization problem. if (exceededHosts.length === 0) { + debug('No ressource\'s utilization problem.') return } @@ -322,9 +335,9 @@ class Plan { debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id})`) - promises.push( - xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) - ) + // promises.push( + // xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) + // ) } return @@ -384,18 +397,21 @@ class LoadBalancerPlugin { constructor (xo) { this.xo = xo this._cronJob = makeCronJob(`*/${EXECUTION_DELAY} * * * *`, ::this._executePlans) + this._emitter } async configure ({ plans }) { const cronJob = this._cronJob - const enabled = cronJob.running + const enabled = cronJob.isEnabled() if (enabled) { cronJob.stop() } // Wait until all old plans stopped running. - await this._plansPromise + if (cronJob.running) { + await eventToPromise(cronJob.emitter, 'finish') + } this._plans = [] this._poolIds = [] // Used pools. @@ -454,9 +470,9 @@ class LoadBalancerPlugin { } _executePlans () { - return (this._plansPromise = Promise.all( + return Promise.all( mapToArray(this._plans, plan => plan.execute()) - )) + ) } } From e5146f7defe86c9afa510e4cb2957741d0612887 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 2 Mar 2016 12:16:30 +0100 Subject: [PATCH 15/50] Many fixes. Cron Job is in a job structure. --- packages/xo-server-load-balancer/src/index.js | 34 +++++++------------ 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index d0516034c..bd8f68a76 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -36,11 +36,11 @@ const MINUTES_OF_HISTORICAL_DATA = 30 // Threshold cpu in percent. // const CRITICAL_THRESHOLD_CPU = 90 -const HIGH_THRESHOLD_CPU = 76.5 +const HIGH_THRESHOLD_CPU = 75 // const LOW_THRESHOLD_CPU = 22.5 -// const CRITICAL_THRESHOLD_FREE_MEMORY = 51 -const HIGH_THRESHOLD_FREE_MEMORY = 63.75 +// const CRITICAL_THRESHOLD_FREE_MEMORY = 50 +const HIGH_THRESHOLD_FREE_MEMORY = 65 // const LOW_THRESHOLD_FREE_MEMORY = 1020 // =================================================================== @@ -115,7 +115,7 @@ export const configurationSchema = { // =================================================================== -const makeCronJob = (cronPattern, fn) => { +const makeJob = (cronPattern, fn) => { const job = { running: false, emitter: new Emitter() @@ -396,21 +396,21 @@ class Plan { class LoadBalancerPlugin { constructor (xo) { this.xo = xo - this._cronJob = makeCronJob(`*/${EXECUTION_DELAY} * * * *`, ::this._executePlans) + this._job = makeJob(`*/${EXECUTION_DELAY} * * * *`, ::this._executePlans) this._emitter } async configure ({ plans }) { - const cronJob = this._cronJob - const enabled = cronJob.isEnabled() + const job = this._job + const enabled = job.isEnabled() if (enabled) { - cronJob.stop() + job.stop() } // Wait until all old plans stopped running. - if (cronJob.running) { - await eventToPromise(cronJob.emitter, 'finish') + if (job.running) { + await eventToPromise(job.emitter, 'finish') } this._plans = [] @@ -437,25 +437,17 @@ class LoadBalancerPlugin { } } - // TMP - this._addPlan({ - name: 'Test plan', - mode: PERFORMANCE_MODE, - behavior: AGGRESSIVE_BEHAVIOR, - poolIds: [ '313624ab-0958-bb1e-45b5-7556a463a10b' ] - }) - if (enabled) { - cronJob.start() + job.start() } } load () { - this._cronJob.start() + this._job.cron.start() } unload () { - this._cronJob.stop() + this._job.cron.stop() } _addPlan (plan) { From 7f06d6e68ced1c59301516df7ecb4a299a074475 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 2 Mar 2016 16:56:59 +0100 Subject: [PATCH 16/50] Low threshold are automatically computed. --- packages/xo-server-load-balancer/src/index.js | 83 ++++++++++++------- 1 file changed, 54 insertions(+), 29 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index bd8f68a76..b7bb82b84 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -34,14 +34,14 @@ const AGGRESSIVE_BEHAVIOR = 2 const EXECUTION_DELAY = 1 const MINUTES_OF_HISTORICAL_DATA = 30 -// Threshold cpu in percent. -// const CRITICAL_THRESHOLD_CPU = 90 -const HIGH_THRESHOLD_CPU = 75 -// const LOW_THRESHOLD_CPU = 22.5 +// CPU threshold in percent. +const DEFAULT_HIGH_THRESHOLD_CPU = 75 -// const CRITICAL_THRESHOLD_FREE_MEMORY = 50 -const HIGH_THRESHOLD_FREE_MEMORY = 65 -// const LOW_THRESHOLD_FREE_MEMORY = 1020 +// Memory threshold in MB. +const DEFAULT_HIGH_THRESHOLD_MEMORY_FREE = 64 + +const THRESHOLD_FACTOR = 0.3 +const THRESHOLD_FACTOR_MEMORY_FREE = 16 // =================================================================== @@ -181,17 +181,6 @@ function computeRessourcesAverage (objects, objectsStats, nPoints) { return averages } -function checkRessourcesThresholds (objects, averages) { - return filter(objects, object => { - const objectAverages = averages[object.id] - - return ( - objectAverages.cpus >= HIGH_THRESHOLD_CPU || - objectAverages.memoryFree >= HIGH_THRESHOLD_FREE_MEMORY - ) - }) -} - function computeRessourcesAverageWithWeight (averages1, averages2, ratio) { const averages = {} @@ -227,12 +216,29 @@ function searchObject (objects, fun) { // =================================================================== class Plan { - constructor (xo, { name, mode, behavior, poolIds }) { + constructor (xo, { name, mode, behavior, poolIds, thresholds = {} }) { this.xo = xo this._name = name // Useful ? this._mode = mode this._behavior = behavior this._poolIds = poolIds + + this._thresholds = { + cpu: { + high: thresholds.cpu || DEFAULT_HIGH_THRESHOLD_CPU + }, + memoryFree: { + high: thresholds.memoryFree || DEFAULT_HIGH_THRESHOLD_MEMORY_FREE * 1024 * 1024 + } + } + + for (const key in this._thresholds) { + const attr = this._thresholds[key] + + attr.low = (key !== 'memoryFree') + ? attr.high * THRESHOLD_FACTOR + : attr.high * THRESHOLD_FACTOR_MEMORY_FREE + } } async execute () { @@ -249,7 +255,7 @@ class Plan { // 1. Check if a ressource's utilization exceeds threshold. const avgNow = computeRessourcesAverage(hosts, hostsStats, EXECUTION_DELAY) - let exceededHosts = checkRessourcesThresholds(hosts, avgNow) + let exceededHosts = this._checkRessourcesThresholds(hosts, avgNow) // No ressource's utilization problem. if (exceededHosts.length === 0) { @@ -261,7 +267,7 @@ class Plan { const avgBefore = computeRessourcesAverage(hosts, hostsStats, MINUTES_OF_HISTORICAL_DATA) const avgWithRatio = computeRessourcesAverageWithWeight(avgNow, avgBefore, 0.75) - exceededHosts = checkRessourcesThresholds(exceededHosts, avgWithRatio) + exceededHosts = this._checkRessourcesThresholds(exceededHosts, avgWithRatio) // No ressource's utilization problem. if (exceededHosts.length === 0) { @@ -343,6 +349,21 @@ class Plan { return } + _checkRessourcesThresholds (objects, averages) { + return filter(objects, object => { + const objectAverages = averages[object.id] + + return ( + objectAverages.cpus >= this._thresholds.cpu.high || + objectAverages.memoryFree >= this._thresholds.memoryFree.high + ) + }) + } + + // =================================================================== + // Get objects. + // =================================================================== + // Compute hosts for each pool. They can change over time. _getHosts () { return filter(this.xo.getObjects(), object => @@ -350,6 +371,18 @@ class Plan { ) } + async _getVms (hostId) { + return filter(this.xo.getObjects(), object => + object.type === 'VM' && + object.power_state === 'Running' && + object.$container === hostId + ) + } + + // =================================================================== + // Get stats. + // =================================================================== + async _getHostsStats (hosts, granularity) { const hostsStats = {} @@ -366,14 +399,6 @@ class Plan { return hostsStats } - async _getVms (hostId) { - return filter(this.xo.getObjects(), object => - object.type === 'VM' && - object.power_state === 'Running' && - object.$container === hostId - ) - } - async _getVmsStats (vms, granularity) { const vmsStats = {} From 651e4bb7757e5ffdcbe4d85b640d9c646fbe2dac Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 3 Mar 2016 11:17:04 +0100 Subject: [PATCH 17/50] Fixes (cpus to cpu). Add Low/Critical thresholds. --- packages/xo-server-load-balancer/src/index.js | 92 +++++++++++++------ 1 file changed, 64 insertions(+), 28 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index b7bb82b84..ce7311e05 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -35,13 +35,16 @@ const EXECUTION_DELAY = 1 const MINUTES_OF_HISTORICAL_DATA = 30 // CPU threshold in percent. -const DEFAULT_HIGH_THRESHOLD_CPU = 75 +const DEFAULT_CRITICAL_THRESHOLD_CPU = 90.0 // Memory threshold in MB. -const DEFAULT_HIGH_THRESHOLD_MEMORY_FREE = 64 +const DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE = 64.0 -const THRESHOLD_FACTOR = 0.3 -const THRESHOLD_FACTOR_MEMORY_FREE = 16 +const HIGH_THRESHOLD_FACTOR = 0.85 +const LOW_THRESHOLD_FACTOR = 0.25 + +const HIGH_THRESHOLD_MEMORY_FREE_FACTOR = 1.25 +const LOW_THRESHOLD_MEMORY_FREE_FACTOR = 20.0 // =================================================================== @@ -170,7 +173,7 @@ function computeRessourcesAverage (objects, objectsStats, nPoints) { const { stats } = objectsStats[id] const objectAverages = averages[id] = {} - objectAverages.cpus = computeAverage( + objectAverages.cpu = computeAverage( mapToArray(stats.cpus, cpu => computeAverage(cpu, nPoints)) ) @@ -197,7 +200,7 @@ function computeRessourcesAverageWithWeight (averages1, averages2, ratio) { function setRealCpuAverageOfVms (vms, vmsAverages) { for (const vm of vms) { - vmsAverages[vm.id].cpus /= vm.CPUs.number + vmsAverages[vm.id].cpu /= vm.CPUs.number } } @@ -225,19 +228,26 @@ class Plan { this._thresholds = { cpu: { - high: thresholds.cpu || DEFAULT_HIGH_THRESHOLD_CPU + critical: thresholds.cpu || DEFAULT_CRITICAL_THRESHOLD_CPU }, memoryFree: { - high: thresholds.memoryFree || DEFAULT_HIGH_THRESHOLD_MEMORY_FREE * 1024 * 1024 + critical: thresholds.memoryFree || DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE * 1024 * 1024 } } for (const key in this._thresholds) { const attr = this._thresholds[key] + const { critical } = attr - attr.low = (key !== 'memoryFree') - ? attr.high * THRESHOLD_FACTOR - : attr.high * THRESHOLD_FACTOR_MEMORY_FREE + if (key === 'memoryFree') { + attr.high = critical * HIGH_THRESHOLD_MEMORY_FREE_FACTOR + attr.low = critical * LOW_THRESHOLD_MEMORY_FREE_FACTOR + + continue + } + + attr.high = critical * HIGH_THRESHOLD_FACTOR + attr.low = critical * LOW_THRESHOLD_FACTOR } } @@ -249,6 +259,10 @@ class Plan { } } + // ================================================================= + // Performance mode. + // ================================================================= + async _executeInPerformanceMode () { const hosts = this._getHosts() const hostsStats = await this._getHostsStats(hosts, 'minutes') @@ -280,7 +294,7 @@ class Plan { a = avgWithRatio[a.id] b = avgWithRatio[b.id] - return (b.cpus - a.cpus) || (a.memoryFree - b.memoryFree) + return (b.cpu - a.cpu) || (a.memoryFree - b.memoryFree) }) // 4. Search bests combinations for the worst host. @@ -291,10 +305,6 @@ class Plan { ) } - async _executeInDensityMode () { - throw new Error('not yet implemented') - } - async _optimize (exceededHost, hosts, hostsAverages) { const vms = await this._getVms(exceededHost.id) const vmsStats = await this._getVmsStats(vms, 'minutes') @@ -309,7 +319,7 @@ class Plan { // Sort vms by cpu usage. (higher to lower) vms.sort((a, b) => - vmsAverages[b.id].cpus - vmsAverages[a.id].cpus + vmsAverages[b.id].cpu - vmsAverages[a.id].cpu ) const exceededAverages = hostsAverages[exceededHost.id] @@ -320,20 +330,20 @@ class Plan { for (const vm of vms) { // Search host with lower cpu usage. const destination = searchObject(hosts, (a, b) => - hostsAverages[b.id].cpus - hostsAverages[a.id] + hostsAverages[b.id].cpu - hostsAverages[a.id].cpu ) const destinationAverages = hostsAverages[destination.id] const vmAverages = vmsAverages[vm.id] // Unable to move the vm. if ( - exceededAverages.cpus - vmAverages.cpu < destinationAverages.cpu + vmAverages.cpu || + exceededAverages.cpu - vmAverages.cpu < destinationAverages.cpu + vmAverages.cpu || destinationAverages.memoryFree < vmAverages.memory ) { continue } - exceededAverages.cpus -= vmAverages.cpu + exceededAverages.cpu -= vmAverages.cpu destinationAverages.cpu += vmAverages.cpu exceededAverages.memoryFree += vmAverages.memory @@ -349,15 +359,40 @@ class Plan { return } - _checkRessourcesThresholds (objects, averages) { - return filter(objects, object => { - const objectAverages = averages[object.id] + // ================================================================= + // Density mode. + // ================================================================= - return ( - objectAverages.cpus >= this._thresholds.cpu.high || - objectAverages.memoryFree >= this._thresholds.memoryFree.high - ) - }) + async _executeInDensityMode () { + throw new Error('not yet implemented') + } + + // ================================================================= + // Check ressources. + // ================================================================= + + _checkRessourcesThresholds (objects, averages, mode = PERFORMANCE_MODE) { + if (mode === PERFORMANCE_MODE) { + return filter(objects, object => { + const objectAverages = averages[object.id] + + return ( + objectAverages.cpu >= this._thresholds.cpu.high || + objectAverages.memoryFree <= this._thresholds.memoryFree.high + ) + }) + } else if (mode === DENSITY_MODE) { + return filter(objects, object => { + const objectAverages = averages[object.id] + + return ( + objectAverages.cpu < this._thresholds.cpu.low || + objectAverages.memoryFree > this._thresholds.memoryFree.low + ) + }) + } else { + throw new Error('Unsupported load balancing mode.') + } } // =================================================================== @@ -416,6 +451,7 @@ class Plan { } } +// =================================================================== // =================================================================== class LoadBalancerPlugin { From abd89df365d83c7c5eff2702626368deaa49892c Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 3 Mar 2016 14:47:56 +0100 Subject: [PATCH 18/50] Three class: Plan, PerfomancePlan, DensityPlan. --- packages/xo-server-load-balancer/src/index.js | 371 +++++++++--------- 1 file changed, 192 insertions(+), 179 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index ce7311e05..75a4ad7ca 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -25,10 +25,6 @@ const debug = LOAD_BALANCER_DEBUG const PERFORMANCE_MODE = 0 const DENSITY_MODE = 1 -const LOW_BEHAVIOR = 0 -const NORMAL_BEHAVIOR = 1 -const AGGRESSIVE_BEHAVIOR = 2 - // Delay between each ressources evaluation in minutes. // Must be less than MINUTES_OF_HISTORICAL_DATA. const EXECUTION_DELAY = 1 @@ -40,6 +36,7 @@ const DEFAULT_CRITICAL_THRESHOLD_CPU = 90.0 // Memory threshold in MB. const DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE = 64.0 +// Thresholds factors. const HIGH_THRESHOLD_FACTOR = 0.85 const LOW_THRESHOLD_FACTOR = 0.25 @@ -79,22 +76,6 @@ export const configurationSchema = { ] }, - behavior: { - type: 'object', - - properties: { - low: { type: 'boolean' }, - normal: { type: 'boolean' }, - aggressive: { type: 'boolean' } - }, - - oneOf: [ - { required: ['low'] }, - { required: ['normal'] }, - { required: ['aggressive'] } - ] - }, - pools: { type: 'array', description: 'list of pools id where to apply the policy', @@ -118,6 +99,8 @@ export const configurationSchema = { // =================================================================== +// Create a job not enabled by default. +// A job is a cron task, a running and enabled state. const makeJob = (cronPattern, fn) => { const job = { running: false, @@ -146,6 +129,23 @@ const makeJob = (cronPattern, fn) => { return job } +// Compare a list of objects and give the best. +function searchObject (objects, fun) { + let object = objects[0] + + for (let i = 1; i < objects.length; i++) { + if (fun(object, objects[i]) > 0) { + object = objects[i] + } + } + + return object +} + +// =================================================================== +// Averages. +// =================================================================== + function computeAverage (values, nPoints = values.length) { let sum = 0 let tot = 0 @@ -204,28 +204,15 @@ function setRealCpuAverageOfVms (vms, vmsAverages) { } } -function searchObject (objects, fun) { - let object = objects[0] - - for (let i = 1; i < objects.length; i++) { - if (fun(object, objects[i]) > 0) { - object = objects[i] - } - } - - return object -} - // =================================================================== class Plan { - constructor (xo, { name, mode, behavior, poolIds, thresholds = {} }) { + constructor (xo, name, poolIds, { + thresholds = {} + } = {}) { this.xo = xo - this._name = name // Useful ? - this._mode = mode - this._behavior = behavior + this._name = name this._poolIds = poolIds - this._thresholds = { cpu: { critical: thresholds.cpu || DEFAULT_CRITICAL_THRESHOLD_CPU @@ -251,29 +238,24 @@ class Plan { } } - async execute () { - if (this._mode === PERFORMANCE_MODE) { - await this._executeInPerformanceMode() - } else { - await this._executeInDensityMode() - } + execute () { + throw new Error('Not implemented') } - // ================================================================= - // Performance mode. - // ================================================================= + // =================================================================== + // Get hosts to optimize. + // =================================================================== - async _executeInPerformanceMode () { + async _findHostsToOptimize () { const hosts = this._getHosts() const hostsStats = await this._getHostsStats(hosts, 'minutes') // 1. Check if a ressource's utilization exceeds threshold. const avgNow = computeRessourcesAverage(hosts, hostsStats, EXECUTION_DELAY) - let exceededHosts = this._checkRessourcesThresholds(hosts, avgNow) + const toOptimize = this._checkRessourcesThresholds(hosts, avgNow) // No ressource's utilization problem. - if (exceededHosts.length === 0) { - debug('No ressource\'s utilization problem.') + if (toOptimize.length === 0) { return } @@ -281,118 +263,15 @@ class Plan { const avgBefore = computeRessourcesAverage(hosts, hostsStats, MINUTES_OF_HISTORICAL_DATA) const avgWithRatio = computeRessourcesAverageWithWeight(avgNow, avgBefore, 0.75) - exceededHosts = this._checkRessourcesThresholds(exceededHosts, avgWithRatio) - - // No ressource's utilization problem. - if (exceededHosts.length === 0) { - debug('No ressource\'s utilization problem.') - return + return { + toOptimize: this._checkRessourcesThresholds(toOptimize, avgWithRatio), + averages: avgWithRatio, + hosts } - - // 3. Search the worst exceeded host by priority. - const toOptimize = searchObject(exceededHosts, (a, b) => { - a = avgWithRatio[a.id] - b = avgWithRatio[b.id] - - return (b.cpu - a.cpu) || (a.memoryFree - b.memoryFree) - }) - - // 4. Search bests combinations for the worst host. - await this._optimize( - toOptimize, - filter(hosts, host => host.id !== toOptimize.id), - avgWithRatio - ) } - async _optimize (exceededHost, hosts, hostsAverages) { - const vms = await this._getVms(exceededHost.id) - const vmsStats = await this._getVmsStats(vms, 'minutes') - const vmsAverages = computeRessourcesAverageWithWeight( - computeRessourcesAverage(vms, vmsStats, EXECUTION_DELAY), - computeRessourcesAverage(vms, vmsStats, MINUTES_OF_HISTORICAL_DATA), - 0.75 - ) - - // Compute real CPU usage. Virtuals cpus to reals cpus. - setRealCpuAverageOfVms(vms, vmsAverages) - - // Sort vms by cpu usage. (higher to lower) - vms.sort((a, b) => - vmsAverages[b.id].cpu - vmsAverages[a.id].cpu - ) - - const exceededAverages = hostsAverages[exceededHost.id] - const promises = [] - - const xapiSrc = this.xo.getXapi(exceededHost) - - for (const vm of vms) { - // Search host with lower cpu usage. - const destination = searchObject(hosts, (a, b) => - hostsAverages[b.id].cpu - hostsAverages[a.id].cpu - ) - const destinationAverages = hostsAverages[destination.id] - const vmAverages = vmsAverages[vm.id] - - // Unable to move the vm. - if ( - exceededAverages.cpu - vmAverages.cpu < destinationAverages.cpu + vmAverages.cpu || - destinationAverages.memoryFree < vmAverages.memory - ) { - continue - } - - exceededAverages.cpu -= vmAverages.cpu - destinationAverages.cpu += vmAverages.cpu - - exceededAverages.memoryFree += vmAverages.memory - destinationAverages.memoryFree -= vmAverages.memory - - debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id})`) - - // promises.push( - // xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) - // ) - } - - return - } - - // ================================================================= - // Density mode. - // ================================================================= - - async _executeInDensityMode () { - throw new Error('not yet implemented') - } - - // ================================================================= - // Check ressources. - // ================================================================= - - _checkRessourcesThresholds (objects, averages, mode = PERFORMANCE_MODE) { - if (mode === PERFORMANCE_MODE) { - return filter(objects, object => { - const objectAverages = averages[object.id] - - return ( - objectAverages.cpu >= this._thresholds.cpu.high || - objectAverages.memoryFree <= this._thresholds.memoryFree.high - ) - }) - } else if (mode === DENSITY_MODE) { - return filter(objects, object => { - const objectAverages = averages[object.id] - - return ( - objectAverages.cpu < this._thresholds.cpu.low || - objectAverages.memoryFree > this._thresholds.memoryFree.low - ) - }) - } else { - throw new Error('Unsupported load balancing mode.') - } + _checkRessourcesThresholds () { + throw new Error('Not implemented') } // =================================================================== @@ -451,6 +330,145 @@ class Plan { } } +// =================================================================== + +class PerformancePlan extends Plan { + constructor (xo, name, poolIds, options) { + super(xo, name, poolIds, options) + } + + _checkRessourcesThresholds (objects, averages) { + return filter(objects, object => { + const objectAverages = averages[object.id] + + return ( + objectAverages.cpu >= this._thresholds.cpu.high || + objectAverages.memoryFree <= this._thresholds.memoryFree.high + ) + }) + } + + async execute () { + const { + averages, + hosts, + toOptimize + } = await this._findHostsToOptimize() + + if (toOptimize.length === 0) { + return + } + + const exceededHost = searchObject(toOptimize, (a, b) => { + a = averages[a.id] + b = averages[b.id] + + return (b.cpu - a.cpu) || (a.memoryFree - b.memoryFree) + }) + + // 3. Search bests combinations for the worst host. + await this._optimize({ + exceededHost, + hosts: filter(hosts, host => host.id !== exceededHost.id), + hostsAverages: averages + }) + } + + async _optimize ({ exceededHost, hosts, hostsAverages }) { + const vms = await this._getVms(exceededHost.id) + const vmsStats = await this._getVmsStats(vms, 'minutes') + const vmsAverages = computeRessourcesAverageWithWeight( + computeRessourcesAverage(vms, vmsStats, EXECUTION_DELAY), + computeRessourcesAverage(vms, vmsStats, MINUTES_OF_HISTORICAL_DATA), + 0.75 + ) + + // Compute real CPU usage. Virtuals cpus to reals cpus. + setRealCpuAverageOfVms(vms, vmsAverages) + + // Sort vms by cpu usage. (higher to lower) + vms.sort((a, b) => + vmsAverages[b.id].cpu - vmsAverages[a.id].cpu + ) + + const exceededAverages = hostsAverages[exceededHost.id] + const promises = [] + + const xapiSrc = this.xo.getXapi(exceededHost) + + for (const vm of vms) { + // Search host with lower cpu usage. + const destination = searchObject(hosts, (a, b) => + hostsAverages[b.id].cpu - hostsAverages[a.id].cpu + ) + const destinationAverages = hostsAverages[destination.id] + const vmAverages = vmsAverages[vm.id] + + // Unable to move the vm. + if ( + exceededAverages.cpu - vmAverages.cpu < destinationAverages.cpu + vmAverages.cpu || + destinationAverages.memoryFree > vmAverages.memory + ) { + continue + } + + exceededAverages.cpu -= vmAverages.cpu + destinationAverages.cpu += vmAverages.cpu + + exceededAverages.memoryFree += vmAverages.memory + destinationAverages.memoryFree -= vmAverages.memory + + debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id})`) + + // promises.push( + // xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) + // ) + } + + await Promise.all(promises) + + return + } +} + +// =================================================================== + +class DensityPlan extends Plan { + constructor (xo, name, poolIds, options) { + super(xo, name, poolIds, options) + } + + _checkRessourcesThresholds (objects, averages) { + return filter(objects, object => { + const objectAverages = averages[object.id] + + return ( + objectAverages.cpu < this._thresholds.cpu.low || + objectAverages.memoryFree > this._thresholds.memoryFree.low + ) + }) + } + + async execute () { + throw new Error('Not implemented') + + const hosts = this._getHosts() + const hostsStats = await this._getHostsStats(hosts, 'minutes') + + // 1. Check if a ressource's utilization is under lower threshold. + const avgNow = computeRessourcesAverage(hosts, hostsStats, EXECUTION_DELAY) + let exceededHosts = this._checkRessourcesThresholds(hosts, avgNow, DENSITY_MODE) + + // No ressource's utilization problem. + if (exceededHosts.length === 0) { + debug('No optimization found.') + return + } + + // TODO + } +} + // =================================================================== // =================================================================== @@ -479,22 +497,13 @@ class LoadBalancerPlugin { if (plans) { for (const plan of plans) { - const mode = plan.mode.performance - ? PERFORMANCE_MODE - : DENSITY_MODE - - const { behavior: planBehavior } = plan - let behavior - - if (planBehavior.low) { - behavior = LOW_BEHAVIOR - } else if (planBehavior.normal) { - behavior = NORMAL_BEHAVIOR - } else { - behavior = AGGRESSIVE_BEHAVIOR - } - - this._addPlan({ name: plan.name, mode, behavior, poolIds: plan.pools }) + this._addPlan({ + name: plan.name, + mode: plan.mode.performance + ? PERFORMANCE_MODE + : DENSITY_MODE, + poolIds: plan.pools + }) } } @@ -511,15 +520,19 @@ class LoadBalancerPlugin { this._job.cron.stop() } - _addPlan (plan) { - const poolIds = plan.poolIds = uniq(plan.poolIds) + _addPlan ({ name, mode, poolIds }) { + poolIds = uniq(poolIds) // Check already used pools. - if (intersection(poolIds, this._poolIds) > 0) { + if (intersection(poolIds, this._poolIds).length > 0) { throw new Error(`Pool(s) already included in an other plan: ${poolIds}`) } - this._plans.push(new Plan(this.xo, plan)) + this._poolIds = this._poolIds.concat(poolIds) + this._plans.push(mode === PERFORMANCE_MODE + ? new PerformancePlan(this.xo, name, poolIds) + : new DensityPlan(this.xo, name, poolIds) + ) } _executePlans () { From 9c0967170a1e99b59d711761e237251df64426de Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 9 Mar 2016 12:23:02 +0100 Subject: [PATCH 19/50] Schema update. --- packages/xo-server-load-balancer/src/index.js | 38 +++++++------------ 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 75a4ad7ca..29c4ce872 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -52,49 +52,39 @@ export const configurationSchema = { plans: { type: 'array', description: 'an array of plans', + title: 'Plans', items: { type: 'object', - title: 'plan', + title: 'Plan', properties: { name: { - type: 'string' + type: 'string', + title: 'Name' }, mode: { - type: 'object', - - properties: { - performance: { type: 'boolean' }, - density: { type: 'boolean' } - }, - - oneOf: [ - { required: ['performance'] }, - { required: ['density'] } - ] + type: 'boolean', + title: 'Mode', + description: 'performance mode if enabled, else density mode' }, pools: { type: 'array', - description: 'list of pools id where to apply the policy', - - items: { - type: 'string', - $objectType: 'pool' - }, - - minItems: 1, - uniqueItems: true + $type: 'Pool', + description: 'list of pools where to apply the policy' } - } + }, + + required: [ 'name', 'mode', 'pools' ] }, minItems: 1 } }, - additionalProperties: false + additionalProperties: false, + required: [ 'plans' ] } // =================================================================== From e25d58d70a925c8f0c59233bf23c9445801fc7c9 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 9 Mar 2016 15:34:57 +0100 Subject: [PATCH 20/50] Fixes. --- packages/xo-server-load-balancer/src/index.js | 71 ++++++++++++------- 1 file changed, 44 insertions(+), 27 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 29c4ce872..697f73fdd 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -268,6 +268,16 @@ class Plan { // Get objects. // =================================================================== + _getPlanPools () { + try { + return mapToArray(this._poolIds, poolId => this.xo.getObject(poolId)) + } catch (_) { + return [] + } + + // Not reached. + } + // Compute hosts for each pool. They can change over time. _getHosts () { return filter(this.xo.getObjects(), object => @@ -318,6 +328,15 @@ class Plan { return vmsStats } + + async _getVmsAverages (vms) { + const vmsStats = await this._getVmsStats(vms, 'minutes') + return computeRessourcesAverageWithWeight( + computeRessourcesAverage(vms, vmsStats, EXECUTION_DELAY), + computeRessourcesAverage(vms, vmsStats, MINUTES_OF_HISTORICAL_DATA), + 0.75 + ) + } } // =================================================================== @@ -366,12 +385,7 @@ class PerformancePlan extends Plan { async _optimize ({ exceededHost, hosts, hostsAverages }) { const vms = await this._getVms(exceededHost.id) - const vmsStats = await this._getVmsStats(vms, 'minutes') - const vmsAverages = computeRessourcesAverageWithWeight( - computeRessourcesAverage(vms, vmsStats, EXECUTION_DELAY), - computeRessourcesAverage(vms, vmsStats, MINUTES_OF_HISTORICAL_DATA), - 0.75 - ) + const vmsAverages = this._getVmsAverages // Compute real CPU usage. Virtuals cpus to reals cpus. setRealCpuAverageOfVms(vms, vmsAverages) @@ -425,37 +439,40 @@ class PerformancePlan extends Plan { class DensityPlan extends Plan { constructor (xo, name, poolIds, options) { + throw new Error('not yet implemented') // TMP super(xo, name, poolIds, options) } _checkRessourcesThresholds (objects, averages) { - return filter(objects, object => { - const objectAverages = averages[object.id] - - return ( - objectAverages.cpu < this._thresholds.cpu.low || - objectAverages.memoryFree > this._thresholds.memoryFree.low - ) - }) + return filter(objects, object => + averages[object.id].cpu < this._thresholds.cpu.high + ) } async execute () { - throw new Error('Not implemented') + const [ + { + averages, + hosts, + toOptimize + }, + pools + ] = await Promise.all(mapToArray( + this._findHostsToOptimize(), + this._getPlanPools() + )) - const hosts = this._getHosts() - const hostsStats = await this._getHostsStats(hosts, 'minutes') + // Optimize master. + console.log(hosts) - // 1. Check if a ressource's utilization is under lower threshold. - const avgNow = computeRessourcesAverage(hosts, hostsStats, EXECUTION_DELAY) - let exceededHosts = this._checkRessourcesThresholds(hosts, avgNow, DENSITY_MODE) - - // No ressource's utilization problem. - if (exceededHosts.length === 0) { - debug('No optimization found.') + if (toOptimize.length === 0) { return } + } + + async _optimizeMaster (master, hosts) { + - // TODO } } @@ -474,7 +491,7 @@ class LoadBalancerPlugin { const enabled = job.isEnabled() if (enabled) { - job.stop() + job.cron.stop() } // Wait until all old plans stopped running. @@ -498,7 +515,7 @@ class LoadBalancerPlugin { } if (enabled) { - job.start() + job.cron.start() } } From eaad41fe5591ab0f0a540079efb9c659ade9fe02 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 9 Mar 2016 16:06:06 +0100 Subject: [PATCH 21/50] Load balancer config use a checkbox. --- packages/xo-server-load-balancer/src/index.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 697f73fdd..4f806f8e5 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -506,7 +506,7 @@ class LoadBalancerPlugin { for (const plan of plans) { this._addPlan({ name: plan.name, - mode: plan.mode.performance + mode: !plan.mode ? PERFORMANCE_MODE : DENSITY_MODE, poolIds: plan.pools From 021cea0b34a484dff9717135429ebb87f34ce3e0 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 9 Mar 2016 16:59:36 +0100 Subject: [PATCH 22/50] Thresholds are exposed in configuration. --- packages/xo-server-load-balancer/src/index.js | 56 +++++++++++++------ 1 file changed, 40 insertions(+), 16 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 4f806f8e5..6d4bad633 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -74,11 +74,30 @@ export const configurationSchema = { type: 'array', $type: 'Pool', description: 'list of pools where to apply the policy' + }, + + thresholds: { + type: 'object', + title: 'Critical thresholds', + + properties: { + cpu: { + type: 'integer', + title: 'CPU (%)', + description: 'default: 90%' + }, + memoryFree: { + type: 'integer', + title: 'RAM, Free memory (MB)', + description: 'default: 64MB' + } + } } }, required: [ 'name', 'mode', 'pools' ] }, + minItems: 1 } }, @@ -208,7 +227,7 @@ class Plan { critical: thresholds.cpu || DEFAULT_CRITICAL_THRESHOLD_CPU }, memoryFree: { - critical: thresholds.memoryFree || DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE * 1024 * 1024 + critical: (thresholds.memoryFree || DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE) * 1024 } } @@ -358,15 +377,17 @@ class PerformancePlan extends Plan { } async execute () { + const data = await this._findHostsToOptimize() + + if (!hosts) { + return + } + const { averages, hosts, toOptimize - } = await this._findHostsToOptimize() - - if (toOptimize.length === 0) { - return - } + } = data const exceededHost = searchObject(toOptimize, (a, b) => { a = averages[a.id] @@ -385,7 +406,7 @@ class PerformancePlan extends Plan { async _optimize ({ exceededHost, hosts, hostsAverages }) { const vms = await this._getVms(exceededHost.id) - const vmsAverages = this._getVmsAverages + const vmsAverages = await this._getVmsAverages(vms) // Compute real CPU usage. Virtuals cpus to reals cpus. setRealCpuAverageOfVms(vms, vmsAverages) @@ -451,17 +472,19 @@ class DensityPlan extends Plan { async execute () { const [ - { - averages, - hosts, - toOptimize - }, + data, pools ] = await Promise.all(mapToArray( this._findHostsToOptimize(), this._getPlanPools() )) + const { + averages, + hosts, + toOptimize + } = data + // Optimize master. console.log(hosts) @@ -509,7 +532,8 @@ class LoadBalancerPlugin { mode: !plan.mode ? PERFORMANCE_MODE : DENSITY_MODE, - poolIds: plan.pools + poolIds: plan.pools, + thresholds: plan.thresholds }) } } @@ -527,7 +551,7 @@ class LoadBalancerPlugin { this._job.cron.stop() } - _addPlan ({ name, mode, poolIds }) { + _addPlan ({ name, mode, poolIds, thresholds }) { poolIds = uniq(poolIds) // Check already used pools. @@ -537,8 +561,8 @@ class LoadBalancerPlugin { this._poolIds = this._poolIds.concat(poolIds) this._plans.push(mode === PERFORMANCE_MODE - ? new PerformancePlan(this.xo, name, poolIds) - : new DensityPlan(this.xo, name, poolIds) + ? new PerformancePlan(this.xo, name, poolIds, { thresholds }) + : new DensityPlan(this.xo, name, poolIds, { thresholds }) ) } From 39b0ea381bb8f9ee81e45c7af05e6feaafae012d Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 9 Mar 2016 17:24:57 +0100 Subject: [PATCH 23/50] Debug exec plans --- packages/xo-server-load-balancer/src/index.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 6d4bad633..29ff1fd33 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -529,7 +529,7 @@ class LoadBalancerPlugin { for (const plan of plans) { this._addPlan({ name: plan.name, - mode: !plan.mode + mode: plan.mode ? PERFORMANCE_MODE : DENSITY_MODE, poolIds: plan.pools, @@ -567,6 +567,8 @@ class LoadBalancerPlugin { } _executePlans () { + debug('Execute plans !') + return Promise.all( mapToArray(this._plans, plan => plan.execute()) ) From aa117a0ee393014d53f912432fca3c73c1cc96f0 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 9 Mar 2016 17:29:57 +0100 Subject: [PATCH 24/50] Fixes hosts top optimize. --- packages/xo-server-load-balancer/src/index.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 29ff1fd33..bc3527532 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -265,6 +265,7 @@ class Plan { // No ressource's utilization problem. if (toOptimize.length === 0) { + debug('No hosts to optimize.') return } @@ -379,7 +380,8 @@ class PerformancePlan extends Plan { async execute () { const data = await this._findHostsToOptimize() - if (!hosts) { + if (!data) { + debug('No hosts to optimize.') return } From 96190c21d627f2e8d29b6f8cd26a3de209c60e07 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 9 Mar 2016 17:53:38 +0100 Subject: [PATCH 25/50] threshold 0 can by used with cpu. --- packages/xo-server-load-balancer/src/index.js | 25 +++++++++++++++---- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index bc3527532..d04e4adaa 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -224,7 +224,15 @@ class Plan { this._poolIds = poolIds this._thresholds = { cpu: { - critical: thresholds.cpu || DEFAULT_CRITICAL_THRESHOLD_CPU + critical: (() => { + const { cpu } = thresholds + + if (cpu >= 0) { + return cpu + } + + return DEFAULT_CRITICAL_THRESHOLD_CPU + })() }, memoryFree: { critical: (thresholds.memoryFree || DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE) * 1024 @@ -261,11 +269,11 @@ class Plan { // 1. Check if a ressource's utilization exceeds threshold. const avgNow = computeRessourcesAverage(hosts, hostsStats, EXECUTION_DELAY) - const toOptimize = this._checkRessourcesThresholds(hosts, avgNow) + let toOptimize = this._checkRessourcesThresholds(hosts, avgNow) // No ressource's utilization problem. if (toOptimize.length === 0) { - debug('No hosts to optimize.') + debug('No hosts to opdeededtizzzzzzzzzmize.') return } @@ -273,8 +281,16 @@ class Plan { const avgBefore = computeRessourcesAverage(hosts, hostsStats, MINUTES_OF_HISTORICAL_DATA) const avgWithRatio = computeRessourcesAverageWithWeight(avgNow, avgBefore, 0.75) + toOptimize = this._checkRessourcesThresholds(toOptimize, avgWithRatio) + + // No ressource's utilization problem. + if (toOptimize.length === 0) { + debug('No hosts to opddedzssssstizzzzzzzzzmize.') + return + } + return { - toOptimize: this._checkRessourcesThresholds(toOptimize, avgWithRatio), + toOptimize, averages: avgWithRatio, hosts } @@ -381,7 +397,6 @@ class PerformancePlan extends Plan { const data = await this._findHostsToOptimize() if (!data) { - debug('No hosts to optimize.') return } From 9129bfa284b7cd6baaba47a8a14684e031254695 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Fri, 11 Mar 2016 11:32:37 +0100 Subject: [PATCH 26/50] Excluded hosts in options. --- packages/xo-server-load-balancer/package.json | 4 +- packages/xo-server-load-balancer/src/index.js | 95 ++++++++++--------- 2 files changed, 50 insertions(+), 49 deletions(-) diff --git a/packages/xo-server-load-balancer/package.json b/packages/xo-server-load-balancer/package.json index d8b0197a5..08915043c 100644 --- a/packages/xo-server-load-balancer/package.json +++ b/packages/xo-server-load-balancer/package.json @@ -33,12 +33,12 @@ "babel-runtime": "^5.8.34", "cron": "^1.1.0", "event-to-promise": "^0.6.0", + "lodash.differenceby": "^4.2.1", "lodash.filter": "^4.2.0", "lodash.includes": "^4.1.0", "lodash.intersection": "^4.1.0", "lodash.map": "^4.2.0", - "lodash.uniq": "^4.2.0", - "node-xmpp-client": "^3.0.0" + "lodash.uniq": "^4.2.0" }, "devDependencies": { "babel": "^5.8.34", diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index d04e4adaa..15168b8ec 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -1,5 +1,6 @@ import EventEmitter from 'events' +import differenceBy from 'lodash.differenceby' import eventToPromise from 'event-to-promise' import filter from 'lodash.filter' import includes from 'lodash.includes' @@ -92,6 +93,13 @@ export const configurationSchema = { description: 'default: 64MB' } } + }, + + excludedHosts: { + type: 'array', + title: 'Excluded hosts', + $type: 'Host', + description: 'list of hosts that are not affected by the plan' } }, @@ -102,8 +110,7 @@ export const configurationSchema = { } }, - additionalProperties: false, - required: [ 'plans' ] + additionalProperties: false } // =================================================================== @@ -215,27 +222,23 @@ function setRealCpuAverageOfVms (vms, vmsAverages) { // =================================================================== +const numberOrDefault = (value, def) => (value >= 0) ? value : def + class Plan { constructor (xo, name, poolIds, { - thresholds = {} + excludedHosts, + thresholds } = {}) { this.xo = xo this._name = name this._poolIds = poolIds + this._excludedHosts = excludedHosts this._thresholds = { cpu: { - critical: (() => { - const { cpu } = thresholds - - if (cpu >= 0) { - return cpu - } - - return DEFAULT_CRITICAL_THRESHOLD_CPU - })() + critical: numberOrDefault(thresholds.cpu, DEFAULT_CRITICAL_THRESHOLD_CPU) }, memoryFree: { - critical: (thresholds.memoryFree || DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE) * 1024 + critical: numberOrDefault(thresholds.memoryFree, DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE) * 1024 } } @@ -246,12 +249,10 @@ class Plan { if (key === 'memoryFree') { attr.high = critical * HIGH_THRESHOLD_MEMORY_FREE_FACTOR attr.low = critical * LOW_THRESHOLD_MEMORY_FREE_FACTOR - - continue + } else { + attr.high = critical * HIGH_THRESHOLD_FACTOR + attr.low = critical * LOW_THRESHOLD_FACTOR } - - attr.high = critical * HIGH_THRESHOLD_FACTOR - attr.low = critical * LOW_THRESHOLD_FACTOR } } @@ -267,17 +268,17 @@ class Plan { const hosts = this._getHosts() const hostsStats = await this._getHostsStats(hosts, 'minutes') - // 1. Check if a ressource's utilization exceeds threshold. + // Check if a ressource's utilization exceeds threshold. const avgNow = computeRessourcesAverage(hosts, hostsStats, EXECUTION_DELAY) let toOptimize = this._checkRessourcesThresholds(hosts, avgNow) // No ressource's utilization problem. if (toOptimize.length === 0) { - debug('No hosts to opdeededtizzzzzzzzzmize.') + debug('No hosts to optimize.') return } - // 2. Check in the last 30 min interval with ratio. + // Check in the last 30 min interval with ratio. const avgBefore = computeRessourcesAverage(hosts, hostsStats, MINUTES_OF_HISTORICAL_DATA) const avgWithRatio = computeRessourcesAverageWithWeight(avgNow, avgBefore, 0.75) @@ -285,7 +286,7 @@ class Plan { // No ressource's utilization problem. if (toOptimize.length === 0) { - debug('No hosts to opddedzssssstizzzzzzzzzmize.') + debug('No hosts to optimize.') return } @@ -316,8 +317,12 @@ class Plan { // Compute hosts for each pool. They can change over time. _getHosts () { - return filter(this.xo.getObjects(), object => - object.type === 'host' && includes(this._poolIds, object.$poolId) + return differenceBy( + filter(this.xo.getObjects(), object => + object.type === 'host' && includes(this._poolIds, object.$poolId) + ), + this._excludedHosts, + val => val.id || val ) } @@ -367,11 +372,16 @@ class Plan { async _getVmsAverages (vms) { const vmsStats = await this._getVmsStats(vms, 'minutes') - return computeRessourcesAverageWithWeight( + const vmsAverages = computeRessourcesAverageWithWeight( computeRessourcesAverage(vms, vmsStats, EXECUTION_DELAY), computeRessourcesAverage(vms, vmsStats, MINUTES_OF_HISTORICAL_DATA), 0.75 ) + + // Compute real CPU usage. Virtuals cpus to reals cpus. + setRealCpuAverageOfVms(vms, vmsAverages) + + return vmsAverages } } @@ -394,9 +404,9 @@ class PerformancePlan extends Plan { } async execute () { - const data = await this._findHostsToOptimize() + const results = await this._findHostsToOptimize() - if (!data) { + if (!results) { return } @@ -404,7 +414,7 @@ class PerformancePlan extends Plan { averages, hosts, toOptimize - } = data + } = results const exceededHost = searchObject(toOptimize, (a, b) => { a = averages[a.id] @@ -413,7 +423,7 @@ class PerformancePlan extends Plan { return (b.cpu - a.cpu) || (a.memoryFree - b.memoryFree) }) - // 3. Search bests combinations for the worst host. + // Search bests combinations for the worst host. await this._optimize({ exceededHost, hosts: filter(hosts, host => host.id !== exceededHost.id), @@ -425,9 +435,6 @@ class PerformancePlan extends Plan { const vms = await this._getVms(exceededHost.id) const vmsAverages = await this._getVmsAverages(vms) - // Compute real CPU usage. Virtuals cpus to reals cpus. - setRealCpuAverageOfVms(vms, vmsAverages) - // Sort vms by cpu usage. (higher to lower) vms.sort((a, b) => vmsAverages[b.id].cpu - vmsAverages[a.id].cpu @@ -544,14 +551,8 @@ class LoadBalancerPlugin { if (plans) { for (const plan of plans) { - this._addPlan({ - name: plan.name, - mode: plan.mode - ? PERFORMANCE_MODE - : DENSITY_MODE, - poolIds: plan.pools, - thresholds: plan.thresholds - }) + plan.mode = plan.mode ? PERFORMANCE_MODE : DENSITY_MODE + this._addPlan(plan) } } @@ -568,18 +569,18 @@ class LoadBalancerPlugin { this._job.cron.stop() } - _addPlan ({ name, mode, poolIds, thresholds }) { - poolIds = uniq(poolIds) + _addPlan ({ name, mode, pools, ...options }) { + pools = uniq(pools) // Check already used pools. - if (intersection(poolIds, this._poolIds).length > 0) { - throw new Error(`Pool(s) already included in an other plan: ${poolIds}`) + if (intersection(pools, this._poolIds).length > 0) { + throw new Error(`Pool(s) already included in an other plan: ${pools}`) } - this._poolIds = this._poolIds.concat(poolIds) + this._poolIds = this._poolIds.concat(pools) this._plans.push(mode === PERFORMANCE_MODE - ? new PerformancePlan(this.xo, name, poolIds, { thresholds }) - : new DensityPlan(this.xo, name, poolIds, { thresholds }) + ? new PerformancePlan(this.xo, name, pools, options) + : new DensityPlan(this.xo, name, pools, options) ) } From 5dacf9c3f536f08e9a9d1b1f489dc293edb4e4a7 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Fri, 11 Mar 2016 14:42:40 +0100 Subject: [PATCH 27/50] The configuration object must not be modified. --- packages/xo-server-load-balancer/src/index.js | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 15168b8ec..f1682e429 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -551,8 +551,7 @@ class LoadBalancerPlugin { if (plans) { for (const plan of plans) { - plan.mode = plan.mode ? PERFORMANCE_MODE : DENSITY_MODE - this._addPlan(plan) + this._addPlan(plan.mode ? PERFORMANCE_MODE : DENSITY_MODE, plan) } } @@ -569,7 +568,7 @@ class LoadBalancerPlugin { this._job.cron.stop() } - _addPlan ({ name, mode, pools, ...options }) { + _addPlan (mode, { name, pools, ...options }) { pools = uniq(pools) // Check already used pools. From ae22adc920464e3db2838f4381dec2e96546876c Mon Sep 17 00:00:00 2001 From: wescoeur Date: Fri, 11 Mar 2016 15:45:23 +0100 Subject: [PATCH 28/50] Density mode in progress. --- packages/xo-server-load-balancer/src/index.js | 56 +++++++++++++------ 1 file changed, 39 insertions(+), 17 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index f1682e429..a0a3fd3c2 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -158,6 +158,23 @@ function searchObject (objects, fun) { return object } +function sortHostsByPool (pools, hosts) { + const struct = {} + + for (const host of hosts) { + const poolId = host.$poolId + let pool = struct[poolId] + + if (pool === undefined) { + pool = struct[poolId] = [] + } + + pool.push(host) + } + + return struct +} + // =================================================================== // Averages. // =================================================================== @@ -484,7 +501,6 @@ class PerformancePlan extends Plan { class DensityPlan extends Plan { constructor (xo, name, poolIds, options) { - throw new Error('not yet implemented') // TMP super(xo, name, poolIds, options) } @@ -495,31 +511,37 @@ class DensityPlan extends Plan { } async execute () { - const [ - data, - pools - ] = await Promise.all(mapToArray( - this._findHostsToOptimize(), - this._getPlanPools() - )) + const results = await this._findHostsToOptimize() + + if (!results) { + return + } const { - averages, + averages: hostsAverages, hosts, toOptimize - } = data + } = results + + const pools = await this._getPlanPools() + const hostsByPool = sortHostsByPool(pools, hosts) + + // TODO: Remove masters from toOptimize and hosts. + + // Optimize all masters. + await Promise.all( + mapToArray(hostsByPool, hosts => + this._optimizeMaster({ toOptimize, hosts, hostsAverages }) + ) + ) // Optimize master. console.log(hosts) - - if (toOptimize.length === 0) { - return - } } - async _optimizeMaster (master, hosts) { - - + async _optimizeMaster ({ toOptimize, hosts, hostsAverages }) { + // TODO + throw new Error('Not yet implemented') } } From 3b6c5898fe9ada73821cb097a212358921a9fd96 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Fri, 11 Mar 2016 15:57:29 +0100 Subject: [PATCH 29/50] Plugin can use enum for modes now. --- packages/xo-server-load-balancer/src/index.js | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index a0a3fd3c2..07ee7733c 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -66,9 +66,8 @@ export const configurationSchema = { }, mode: { - type: 'boolean', - title: 'Mode', - description: 'performance mode if enabled, else density mode' + enum: [ 'Performance mode', 'Density mode' ], + title: 'Mode' }, pools: { @@ -573,7 +572,7 @@ class LoadBalancerPlugin { if (plans) { for (const plan of plans) { - this._addPlan(plan.mode ? PERFORMANCE_MODE : DENSITY_MODE, plan) + this._addPlan(plan.mode === 'Performance mode' ? PERFORMANCE_MODE : DENSITY_MODE, plan) } } From d7ed9ab64e7a28f77471c278adf183aa9ede5f81 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Fri, 11 Mar 2016 16:48:36 +0100 Subject: [PATCH 30/50] Remove masters from toOptimize and hosts. --- packages/xo-server-load-balancer/src/index.js | 35 ++++++++++++------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 07ee7733c..51dc4c077 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -157,7 +157,7 @@ function searchObject (objects, fun) { return object } -function sortHostsByPool (pools, hosts) { +function sortHostsByPool (hosts) { const struct = {} for (const host of hosts) { @@ -322,13 +322,17 @@ class Plan { // =================================================================== _getPlanPools () { + const pools = {} + try { - return mapToArray(this._poolIds, poolId => this.xo.getObject(poolId)) + for (const poolId of this._poolIds) { + pools[poolId] = this.xo.getObject(poolId) + } } catch (_) { - return [] + return {} } - // Not reached. + return pools } // Compute hosts for each pool. They can change over time. @@ -518,27 +522,34 @@ class DensityPlan extends Plan { const { averages: hostsAverages, - hosts, - toOptimize + hosts } = results + let { toOptimize } = results const pools = await this._getPlanPools() - const hostsByPool = sortHostsByPool(pools, hosts) + const hostsByPool = sortHostsByPool(hosts) - // TODO: Remove masters from toOptimize and hosts. + // Remove masters from toOptimize and hosts. + for (const poolId in hostsByPool) { + const pool = hostsByPool[poolId] + hostsByPool[poolId] = filter(pool, host => host.id !== pool.master) + } + toOptimize = differenceBy(toOptimize, pools, object => { + object.type === 'host' ? object.id : object.master + }) // Optimize all masters. await Promise.all( - mapToArray(hostsByPool, hosts => - this._optimizeMaster({ toOptimize, hosts, hostsAverages }) - ) + mapToArray(hostsByPool, (hosts, poolId) => { + this._optimizeMaster({ toOptimize, pool: pools[poolId], hosts, hostsAverages }) + }) ) // Optimize master. console.log(hosts) } - async _optimizeMaster ({ toOptimize, hosts, hostsAverages }) { + async _optimizeMaster ({ toOptimize, pool, hosts, hostsAverages }) { // TODO throw new Error('Not yet implemented') } From da0c1cec2291c53cfdfed438e26e1e05446f8089 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 17 Mar 2016 11:01:12 +0100 Subject: [PATCH 31/50] Fix cpu average. Use the cpu number of host and vm. --- packages/xo-server-load-balancer/src/index.js | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 51dc4c077..36e75f218 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -208,6 +208,7 @@ function computeRessourcesAverage (objects, objectsStats, nPoints) { objectAverages.cpu = computeAverage( mapToArray(stats.cpus, cpu => computeAverage(cpu, nPoints)) ) + objectAverages.nCpus = stats.cpus.length objectAverages.memoryFree = computeAverage(stats.memoryFree, nPoints) objectAverages.memory = computeAverage(stats.memory, nPoints) @@ -230,9 +231,10 @@ function computeRessourcesAverageWithWeight (averages1, averages2, ratio) { return averages } -function setRealCpuAverageOfVms (vms, vmsAverages) { +function setRealCpuAverageOfVms (vms, vmsAverages, nCpus) { for (const vm of vms) { - vmsAverages[vm.id].cpu /= vm.CPUs.number + const averages = vmsAverages[vm.id] + averages.cpu *= averages.nCpus / nCpus } } @@ -390,7 +392,7 @@ class Plan { return vmsStats } - async _getVmsAverages (vms) { + async _getVmsAverages (vms, host) { const vmsStats = await this._getVmsStats(vms, 'minutes') const vmsAverages = computeRessourcesAverageWithWeight( computeRessourcesAverage(vms, vmsStats, EXECUTION_DELAY), @@ -399,7 +401,7 @@ class Plan { ) // Compute real CPU usage. Virtuals cpus to reals cpus. - setRealCpuAverageOfVms(vms, vmsAverages) + setRealCpuAverageOfVms(vms, vmsAverages, host.CPUs.cpu_count) return vmsAverages } @@ -453,7 +455,7 @@ class PerformancePlan extends Plan { async _optimize ({ exceededHost, hosts, hostsAverages }) { const vms = await this._getVms(exceededHost.id) - const vmsAverages = await this._getVmsAverages(vms) + const vmsAverages = await this._getVmsAverages(vms, exceededHost) // Sort vms by cpu usage. (higher to lower) vms.sort((a, b) => From 12e07597112d74b6f9d3e87bda44183abe0b1b57 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 17 Mar 2016 11:21:15 +0100 Subject: [PATCH 32/50] Optimize hosts in order priority. --- packages/xo-server-load-balancer/src/index.js | 27 ++++++++++++------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 36e75f218..aa4fef906 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -438,19 +438,25 @@ class PerformancePlan extends Plan { toOptimize } = results - const exceededHost = searchObject(toOptimize, (a, b) => { + toOptimize.sort((a, b) => { a = averages[a.id] b = averages[b.id] return (b.cpu - a.cpu) || (a.memoryFree - b.memoryFree) }) - // Search bests combinations for the worst host. - await this._optimize({ - exceededHost, - hosts: filter(hosts, host => host.id !== exceededHost.id), - hostsAverages: averages - }) + for (const exceededHost of toOptimize) { + const { id } = exceededHost + + debug(`Try to optimize Host (${exceededHost.id}).`) + + // Search bests combinations for the worst host. + await this._optimize({ + exceededHost, + hosts: filter(hosts, host => host.id !== id), + hostsAverages: averages + }) + } } async _optimize ({ exceededHost, hosts, hostsAverages }) { @@ -466,6 +472,7 @@ class PerformancePlan extends Plan { const promises = [] const xapiSrc = this.xo.getXapi(exceededHost) + let optimizationsCount = 0 for (const vm of vms) { // Search host with lower cpu usage. @@ -489,7 +496,8 @@ class PerformancePlan extends Plan { exceededAverages.memoryFree += vmAverages.memory destinationAverages.memoryFree -= vmAverages.memory - debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id})`) + debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id}).`) + optimizationsCount++ // promises.push( // xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) @@ -497,6 +505,7 @@ class PerformancePlan extends Plan { } await Promise.all(promises) + debug(`${optimizationsCount} optimizations for Host (${exceededHost.id}).`) return } @@ -618,7 +627,7 @@ class LoadBalancerPlugin { } _executePlans () { - debug('Execute plans !') + debug('Execute plans!') return Promise.all( mapToArray(this._plans, plan => plan.execute()) From 99e046ddea9e6fa1ccd3ac89ea0f6050c18f3bb0 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 17 Mar 2016 12:11:34 +0100 Subject: [PATCH 33/50] Performance mode: The worst host is removed of hosts array after optimization. --- packages/xo-server-load-balancer/src/index.js | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index aa4fef906..bb76a46ec 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -434,9 +434,9 @@ class PerformancePlan extends Plan { const { averages, - hosts, toOptimize } = results + let { hosts } = results toOptimize.sort((a, b) => { a = averages[a.id] @@ -449,11 +449,12 @@ class PerformancePlan extends Plan { const { id } = exceededHost debug(`Try to optimize Host (${exceededHost.id}).`) + hosts = filter(hosts, host => host.id !== id) // Search bests combinations for the worst host. await this._optimize({ exceededHost, - hosts: filter(hosts, host => host.id !== id), + hosts, hostsAverages: averages }) } @@ -463,7 +464,7 @@ class PerformancePlan extends Plan { const vms = await this._getVms(exceededHost.id) const vmsAverages = await this._getVmsAverages(vms, exceededHost) - // Sort vms by cpu usage. (higher to lower) + // Sort vms by cpu usage. (lower to higher) vms.sort((a, b) => vmsAverages[b.id].cpu - vmsAverages[a.id].cpu ) From 5a825bd4598f7f97b549f873903f7d882ea7f52b Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 23 Mar 2016 16:52:05 +0100 Subject: [PATCH 34/50] Density mode test. --- packages/xo-server-load-balancer/package.json | 1 + packages/xo-server-load-balancer/src/index.js | 210 ++++++++++++++---- 2 files changed, 163 insertions(+), 48 deletions(-) diff --git a/packages/xo-server-load-balancer/package.json b/packages/xo-server-load-balancer/package.json index 08915043c..e987f0f16 100644 --- a/packages/xo-server-load-balancer/package.json +++ b/packages/xo-server-load-balancer/package.json @@ -33,6 +33,7 @@ "babel-runtime": "^5.8.34", "cron": "^1.1.0", "event-to-promise": "^0.6.0", + "lodash.clonedeep": "^4.3.1", "lodash.differenceby": "^4.2.1", "lodash.filter": "^4.2.0", "lodash.includes": "^4.1.0", diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index bb76a46ec..deadd6a40 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -1,5 +1,6 @@ import EventEmitter from 'events' +import clone from 'lodash.clonedeep' import differenceBy from 'lodash.differenceby' import eventToPromise from 'event-to-promise' import filter from 'lodash.filter' @@ -157,23 +158,6 @@ function searchObject (objects, fun) { return object } -function sortHostsByPool (hosts) { - const struct = {} - - for (const host of hosts) { - const poolId = host.$poolId - let pool = struct[poolId] - - if (pool === undefined) { - pool = struct[poolId] = [] - } - - pool.push(host) - } - - return struct -} - // =================================================================== // Averages. // =================================================================== @@ -203,15 +187,15 @@ function computeRessourcesAverage (objects, objectsStats, nPoints) { for (const object of objects) { const { id } = object const { stats } = objectsStats[id] - const objectAverages = averages[id] = {} - objectAverages.cpu = computeAverage( - mapToArray(stats.cpus, cpu => computeAverage(cpu, nPoints)) - ) - objectAverages.nCpus = stats.cpus.length - - objectAverages.memoryFree = computeAverage(stats.memoryFree, nPoints) - objectAverages.memory = computeAverage(stats.memory, nPoints) + averages[id] = { + cpu: computeAverage( + mapToArray(stats.cpus, cpu => computeAverage(cpu, nPoints)) + ), + nCpus: stats.cpus.length, + memoryFree: computeAverage(stats.memoryFree, nPoints), + memory: computeAverage(stats.memory, nPoints) + } } return averages @@ -521,7 +505,7 @@ class DensityPlan extends Plan { _checkRessourcesThresholds (objects, averages) { return filter(objects, object => - averages[object.id].cpu < this._thresholds.cpu.high + averages[object.id].memoryFree > this._thresholds.memoryFree.low ) } @@ -533,37 +517,167 @@ class DensityPlan extends Plan { } const { - averages: hostsAverages, - hosts + hosts, + toOptimize + } = results + + let { + averages: hostsAverages } = results - let { toOptimize } = results const pools = await this._getPlanPools() - const hostsByPool = sortHostsByPool(hosts) - // Remove masters from toOptimize and hosts. - for (const poolId in hostsByPool) { - const pool = hostsByPool[poolId] - hostsByPool[poolId] = filter(pool, host => host.id !== pool.master) - } - toOptimize = differenceBy(toOptimize, pools, object => { - object.type === 'host' ? object.id : object.master - }) + for (const hostToOptimize of toOptimize) { + const { + id: hostId, + $poolId: poolId + } = hostToOptimize - // Optimize all masters. - await Promise.all( - mapToArray(hostsByPool, (hosts, poolId) => { - this._optimizeMaster({ toOptimize, pool: pools[poolId], hosts, hostsAverages }) + const { + master: masterId + } = pools[poolId] + + // Avoid master optimization. + if (masterId === hostId) { + continue + } + + let poolMaster // Pool master. + const poolHosts = [] // Without master. + const masters = [] // Without the master of this loop. + const otherHosts = [] + + for (const dest of hosts) { + const { + id: destId, + $poolId: destPoolId + } = dest + + // Destination host != Host to optimize! + if (destId === hostId) { + continue + } + + if (destPoolId === poolId) { + if (destId === masterId) { + poolMaster = dest + } else { + poolHosts.push(dest) + } + } else if (destId === pools[destPoolId].master) { + masters.push(dest) + } else { + otherHosts.push(dest) + } + } + + const simulResults = await this._simulate({ + host: hostToOptimize, + destinations: [ + [ poolMaster ], + poolHosts, + masters, + otherHosts + ], + hostsAverages: clone(hostsAverages) }) - ) - // Optimize master. - console.log(hosts) + if (simulResults) { + // Update stats. + hostsAverages = simulResults.hostsAverages + + // Migrate. + await this._migrate(simulResults.moves) + } + } } - async _optimizeMaster ({ toOptimize, pool, hosts, hostsAverages }) { - // TODO - throw new Error('Not yet implemented') + async _simulate ({ host, destinations, hostsAverages }) { + const { id: hostId } = host + + debug(`Try to optimize Host (${hostId}).`) + + const vms = await this._getVms(hostId) + const vmsAverages = await this._getVmsAverages(vms, host) + + // Sort vms by amount of memory. (+ -> -) + vms.sort((a, b) => + vmsAverages[b.id].memory - vmsAverages[a.id].memory + ) + + const simulResults = { + hostsAverages, + moves: [] + } + + // Try to find a destination for each VM. + for (const vm of vms) { + let move + + // Simulate the VM move on a destinations set. + for (const subDestinations of destinations) { + move = this._testMigration({ + vm, + destinations: subDestinations, + hostsAverages, + vmsAverages + }) + + // Destination found. + if (move) { + simulResults.moves.push(move) + break + } + } + + // Unable to move a VM. + if (!move) { + return + } + } + + // Done. + return simulResults + } + + // Test if a VM migration on a destination (of a destinations set) is possible. + _testMigration ({ vm, destinations, hostsAverages, vmsAverages }) { + const { + _thresholds: { + critical: criticalThreshold + } + } = this + + // Sort the destinations by available memory. (- -> +) + destinations.sort((a, b) => + hostsAverages[a.id].memoryFree - hostsAverages[b.id].memoryFree + ) + + for (const destination of destinations) { + const destinationAverages = hostsAverages[destination.id] + const vmAverages = vmsAverages[vm.id] + + // Unable to move the VM. + if ( + destinationAverages.cpu + vmAverages.cpu >= criticalThreshold || + destinationAverages.memoryFree - vmAverages.memory <= criticalThreshold + ) { + continue + } + + destinationAverages.cpu += vmAverages.cpu + destinationAverages.memoryFree -= vmAverages.memory + + // Available movement. + return { + vm, + destination + } + } + } + + async _migrate (moves) { + console.log(moves) } } From 3ddb4d2b23e52b53561b8e5bc904c3fbf2b6f138 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 23 Mar 2016 17:37:41 +0100 Subject: [PATCH 35/50] Log VM migrations (density mode). --- packages/xo-server-load-balancer/src/index.js | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index deadd6a40..19bb826a1 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -490,7 +490,7 @@ class PerformancePlan extends Plan { } await Promise.all(promises) - debug(`${optimizationsCount} optimizations for Host (${exceededHost.id}).`) + debug(`Performance mode: ${optimizationsCount} optimizations for Host (${exceededHost.id}).`) return } @@ -526,6 +526,7 @@ class DensityPlan extends Plan { } = results const pools = await this._getPlanPools() + let optimizationsCount = 0 for (const hostToOptimize of toOptimize) { const { @@ -588,8 +589,11 @@ class DensityPlan extends Plan { // Migrate. await this._migrate(simulResults.moves) + optimizationsCount++ } } + + debug(`Density mode: ${optimizationsCount} optimizations.`) } async _simulate ({ host, destinations, hostsAverages }) { @@ -677,7 +681,17 @@ class DensityPlan extends Plan { } async _migrate (moves) { - console.log(moves) + await Promise.all( + mapToArray(moves, move => { + const { + vm, + destination + } = move + const xapiSrc = this.xo.getXapi(destination) + debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${vm.$container}).`) + // xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) + }) + ) } } From 06f60b7d92920ed903f08d11493a73802615f7e4 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 24 Mar 2016 09:46:14 +0100 Subject: [PATCH 36/50] Make code modular. --- .../src/density-plan.js | 207 ++++++ packages/xo-server-load-balancer/src/index.js | 590 +----------------- .../src/performance-plan.js | 112 ++++ packages/xo-server-load-balancer/src/plan.js | 257 ++++++++ packages/xo-server-load-balancer/src/utils.js | 26 + 5 files changed, 609 insertions(+), 583 deletions(-) create mode 100644 packages/xo-server-load-balancer/src/density-plan.js create mode 100644 packages/xo-server-load-balancer/src/performance-plan.js create mode 100644 packages/xo-server-load-balancer/src/plan.js create mode 100644 packages/xo-server-load-balancer/src/utils.js diff --git a/packages/xo-server-load-balancer/src/density-plan.js b/packages/xo-server-load-balancer/src/density-plan.js new file mode 100644 index 000000000..068d01202 --- /dev/null +++ b/packages/xo-server-load-balancer/src/density-plan.js @@ -0,0 +1,207 @@ +import filter from 'lodash.filter' +import clone from 'lodash.clonedeep' +import { default as mapToArray } from 'lodash.map' + +import Plan from './plan' +import { + debug +} from './utils' + +// =================================================================== + +export default class DensityPlan extends Plan { + constructor (xo, name, poolIds, options) { + super(xo, name, poolIds, options) + } + + _checkRessourcesThresholds (objects, averages) { + return filter(objects, object => + averages[object.id].memoryFree > this._thresholds.memoryFree.low + ) + } + + async execute () { + const results = await this._findHostsToOptimize() + + if (!results) { + return + } + + const { + hosts, + toOptimize + } = results + + let { + averages: hostsAverages + } = results + + const pools = await this._getPlanPools() + let optimizationsCount = 0 + + for (const hostToOptimize of toOptimize) { + const { + id: hostId, + $poolId: poolId + } = hostToOptimize + + const { + master: masterId + } = pools[poolId] + + // Avoid master optimization. + if (masterId === hostId) { + continue + } + + let poolMaster // Pool master. + const poolHosts = [] // Without master. + const masters = [] // Without the master of this loop. + const otherHosts = [] + + for (const dest of hosts) { + const { + id: destId, + $poolId: destPoolId + } = dest + + // Destination host != Host to optimize! + if (destId === hostId) { + continue + } + + if (destPoolId === poolId) { + if (destId === masterId) { + poolMaster = dest + } else { + poolHosts.push(dest) + } + } else if (destId === pools[destPoolId].master) { + masters.push(dest) + } else { + otherHosts.push(dest) + } + } + + const simulResults = await this._simulate({ + host: hostToOptimize, + destinations: [ + [ poolMaster ], + poolHosts, + masters, + otherHosts + ], + hostsAverages: clone(hostsAverages) + }) + + if (simulResults) { + // Update stats. + hostsAverages = simulResults.hostsAverages + + // Migrate. + await this._migrate(simulResults.moves) + optimizationsCount++ + } + } + + debug(`Density mode: ${optimizationsCount} optimizations.`) + } + + async _simulate ({ host, destinations, hostsAverages }) { + const { id: hostId } = host + + debug(`Try to optimize Host (${hostId}).`) + + const vms = await this._getVms(hostId) + const vmsAverages = await this._getVmsAverages(vms, host) + + // Sort vms by amount of memory. (+ -> -) + vms.sort((a, b) => + vmsAverages[b.id].memory - vmsAverages[a.id].memory + ) + + const simulResults = { + hostsAverages, + moves: [] + } + + // Try to find a destination for each VM. + for (const vm of vms) { + let move + + // Simulate the VM move on a destinations set. + for (const subDestinations of destinations) { + move = this._testMigration({ + vm, + destinations: subDestinations, + hostsAverages, + vmsAverages + }) + + // Destination found. + if (move) { + simulResults.moves.push(move) + break + } + } + + // Unable to move a VM. + if (!move) { + return + } + } + + // Done. + return simulResults + } + + // Test if a VM migration on a destination (of a destinations set) is possible. + _testMigration ({ vm, destinations, hostsAverages, vmsAverages }) { + const { + _thresholds: { + critical: criticalThreshold + } + } = this + + // Sort the destinations by available memory. (- -> +) + destinations.sort((a, b) => + hostsAverages[a.id].memoryFree - hostsAverages[b.id].memoryFree + ) + + for (const destination of destinations) { + const destinationAverages = hostsAverages[destination.id] + const vmAverages = vmsAverages[vm.id] + + // Unable to move the VM. + if ( + destinationAverages.cpu + vmAverages.cpu >= criticalThreshold || + destinationAverages.memoryFree - vmAverages.memory <= criticalThreshold + ) { + continue + } + + destinationAverages.cpu += vmAverages.cpu + destinationAverages.memoryFree -= vmAverages.memory + + // Available movement. + return { + vm, + destination + } + } + } + + async _migrate (moves) { + await Promise.all( + mapToArray(moves, move => { + const { + vm, + destination + } = move + const xapiSrc = this.xo.getXapi(destination) + debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${vm.$container}).`) + // xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) + }) + ) + } +} diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 19bb826a1..70a78ff76 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -1,50 +1,24 @@ import EventEmitter from 'events' - -import clone from 'lodash.clonedeep' -import differenceBy from 'lodash.differenceby' import eventToPromise from 'event-to-promise' -import filter from 'lodash.filter' -import includes from 'lodash.includes' import intersection from 'lodash.intersection' import uniq from 'lodash.uniq' - import { CronJob } from 'cron' import { default as mapToArray } from 'lodash.map' +import DensityPlan from './density-plan' +import PerformancePlan from './performance-plan' +import { + EXECUTION_DELAY, + debug +} from './utils' + class Emitter extends EventEmitter {} // =================================================================== -const noop = () => {} - -const LOAD_BALANCER_DEBUG = 1 -const debug = LOAD_BALANCER_DEBUG - ? str => console.log(`[load-balancer]${str}`) - : noop - -// =================================================================== - const PERFORMANCE_MODE = 0 const DENSITY_MODE = 1 -// Delay between each ressources evaluation in minutes. -// Must be less than MINUTES_OF_HISTORICAL_DATA. -const EXECUTION_DELAY = 1 -const MINUTES_OF_HISTORICAL_DATA = 30 - -// CPU threshold in percent. -const DEFAULT_CRITICAL_THRESHOLD_CPU = 90.0 - -// Memory threshold in MB. -const DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE = 64.0 - -// Thresholds factors. -const HIGH_THRESHOLD_FACTOR = 0.85 -const LOW_THRESHOLD_FACTOR = 0.25 - -const HIGH_THRESHOLD_MEMORY_FREE_FACTOR = 1.25 -const LOW_THRESHOLD_MEMORY_FREE_FACTOR = 20.0 - // =================================================================== export const configurationSchema = { @@ -145,556 +119,6 @@ const makeJob = (cronPattern, fn) => { return job } -// Compare a list of objects and give the best. -function searchObject (objects, fun) { - let object = objects[0] - - for (let i = 1; i < objects.length; i++) { - if (fun(object, objects[i]) > 0) { - object = objects[i] - } - } - - return object -} - -// =================================================================== -// Averages. -// =================================================================== - -function computeAverage (values, nPoints = values.length) { - let sum = 0 - let tot = 0 - - const { length } = values - - for (let i = length - nPoints; i < length; i++) { - const value = values[i] - - sum += value || 0 - - if (value) { - tot += 1 - } - } - - return sum / tot -} - -function computeRessourcesAverage (objects, objectsStats, nPoints) { - const averages = {} - - for (const object of objects) { - const { id } = object - const { stats } = objectsStats[id] - - averages[id] = { - cpu: computeAverage( - mapToArray(stats.cpus, cpu => computeAverage(cpu, nPoints)) - ), - nCpus: stats.cpus.length, - memoryFree: computeAverage(stats.memoryFree, nPoints), - memory: computeAverage(stats.memory, nPoints) - } - } - - return averages -} - -function computeRessourcesAverageWithWeight (averages1, averages2, ratio) { - const averages = {} - - for (const id in averages1) { - const objectAverages = averages[id] = {} - - for (const averageName in averages1[id]) { - objectAverages[averageName] = averages1[id][averageName] * ratio + averages2[id][averageName] * (1 - ratio) - } - } - - return averages -} - -function setRealCpuAverageOfVms (vms, vmsAverages, nCpus) { - for (const vm of vms) { - const averages = vmsAverages[vm.id] - averages.cpu *= averages.nCpus / nCpus - } -} - -// =================================================================== - -const numberOrDefault = (value, def) => (value >= 0) ? value : def - -class Plan { - constructor (xo, name, poolIds, { - excludedHosts, - thresholds - } = {}) { - this.xo = xo - this._name = name - this._poolIds = poolIds - this._excludedHosts = excludedHosts - this._thresholds = { - cpu: { - critical: numberOrDefault(thresholds.cpu, DEFAULT_CRITICAL_THRESHOLD_CPU) - }, - memoryFree: { - critical: numberOrDefault(thresholds.memoryFree, DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE) * 1024 - } - } - - for (const key in this._thresholds) { - const attr = this._thresholds[key] - const { critical } = attr - - if (key === 'memoryFree') { - attr.high = critical * HIGH_THRESHOLD_MEMORY_FREE_FACTOR - attr.low = critical * LOW_THRESHOLD_MEMORY_FREE_FACTOR - } else { - attr.high = critical * HIGH_THRESHOLD_FACTOR - attr.low = critical * LOW_THRESHOLD_FACTOR - } - } - } - - execute () { - throw new Error('Not implemented') - } - - // =================================================================== - // Get hosts to optimize. - // =================================================================== - - async _findHostsToOptimize () { - const hosts = this._getHosts() - const hostsStats = await this._getHostsStats(hosts, 'minutes') - - // Check if a ressource's utilization exceeds threshold. - const avgNow = computeRessourcesAverage(hosts, hostsStats, EXECUTION_DELAY) - let toOptimize = this._checkRessourcesThresholds(hosts, avgNow) - - // No ressource's utilization problem. - if (toOptimize.length === 0) { - debug('No hosts to optimize.') - return - } - - // Check in the last 30 min interval with ratio. - const avgBefore = computeRessourcesAverage(hosts, hostsStats, MINUTES_OF_HISTORICAL_DATA) - const avgWithRatio = computeRessourcesAverageWithWeight(avgNow, avgBefore, 0.75) - - toOptimize = this._checkRessourcesThresholds(toOptimize, avgWithRatio) - - // No ressource's utilization problem. - if (toOptimize.length === 0) { - debug('No hosts to optimize.') - return - } - - return { - toOptimize, - averages: avgWithRatio, - hosts - } - } - - _checkRessourcesThresholds () { - throw new Error('Not implemented') - } - - // =================================================================== - // Get objects. - // =================================================================== - - _getPlanPools () { - const pools = {} - - try { - for (const poolId of this._poolIds) { - pools[poolId] = this.xo.getObject(poolId) - } - } catch (_) { - return {} - } - - return pools - } - - // Compute hosts for each pool. They can change over time. - _getHosts () { - return differenceBy( - filter(this.xo.getObjects(), object => - object.type === 'host' && includes(this._poolIds, object.$poolId) - ), - this._excludedHosts, - val => val.id || val - ) - } - - async _getVms (hostId) { - return filter(this.xo.getObjects(), object => - object.type === 'VM' && - object.power_state === 'Running' && - object.$container === hostId - ) - } - - // =================================================================== - // Get stats. - // =================================================================== - - async _getHostsStats (hosts, granularity) { - const hostsStats = {} - - await Promise.all(mapToArray(hosts, host => - this.xo.getXapiHostStats(host, granularity).then(hostStats => { - hostsStats[host.id] = { - nPoints: hostStats.stats.cpus[0].length, - stats: hostStats.stats, - averages: {} - } - }) - )) - - return hostsStats - } - - async _getVmsStats (vms, granularity) { - const vmsStats = {} - - await Promise.all(mapToArray(vms, vm => - this.xo.getXapiVmStats(vm, granularity).then(vmStats => { - vmsStats[vm.id] = { - nPoints: vmStats.stats.cpus[0].length, - stats: vmStats.stats, - averages: {} - } - }) - )) - - return vmsStats - } - - async _getVmsAverages (vms, host) { - const vmsStats = await this._getVmsStats(vms, 'minutes') - const vmsAverages = computeRessourcesAverageWithWeight( - computeRessourcesAverage(vms, vmsStats, EXECUTION_DELAY), - computeRessourcesAverage(vms, vmsStats, MINUTES_OF_HISTORICAL_DATA), - 0.75 - ) - - // Compute real CPU usage. Virtuals cpus to reals cpus. - setRealCpuAverageOfVms(vms, vmsAverages, host.CPUs.cpu_count) - - return vmsAverages - } -} - -// =================================================================== - -class PerformancePlan extends Plan { - constructor (xo, name, poolIds, options) { - super(xo, name, poolIds, options) - } - - _checkRessourcesThresholds (objects, averages) { - return filter(objects, object => { - const objectAverages = averages[object.id] - - return ( - objectAverages.cpu >= this._thresholds.cpu.high || - objectAverages.memoryFree <= this._thresholds.memoryFree.high - ) - }) - } - - async execute () { - const results = await this._findHostsToOptimize() - - if (!results) { - return - } - - const { - averages, - toOptimize - } = results - let { hosts } = results - - toOptimize.sort((a, b) => { - a = averages[a.id] - b = averages[b.id] - - return (b.cpu - a.cpu) || (a.memoryFree - b.memoryFree) - }) - - for (const exceededHost of toOptimize) { - const { id } = exceededHost - - debug(`Try to optimize Host (${exceededHost.id}).`) - hosts = filter(hosts, host => host.id !== id) - - // Search bests combinations for the worst host. - await this._optimize({ - exceededHost, - hosts, - hostsAverages: averages - }) - } - } - - async _optimize ({ exceededHost, hosts, hostsAverages }) { - const vms = await this._getVms(exceededHost.id) - const vmsAverages = await this._getVmsAverages(vms, exceededHost) - - // Sort vms by cpu usage. (lower to higher) - vms.sort((a, b) => - vmsAverages[b.id].cpu - vmsAverages[a.id].cpu - ) - - const exceededAverages = hostsAverages[exceededHost.id] - const promises = [] - - const xapiSrc = this.xo.getXapi(exceededHost) - let optimizationsCount = 0 - - for (const vm of vms) { - // Search host with lower cpu usage. - const destination = searchObject(hosts, (a, b) => - hostsAverages[b.id].cpu - hostsAverages[a.id].cpu - ) - const destinationAverages = hostsAverages[destination.id] - const vmAverages = vmsAverages[vm.id] - - // Unable to move the vm. - if ( - exceededAverages.cpu - vmAverages.cpu < destinationAverages.cpu + vmAverages.cpu || - destinationAverages.memoryFree > vmAverages.memory - ) { - continue - } - - exceededAverages.cpu -= vmAverages.cpu - destinationAverages.cpu += vmAverages.cpu - - exceededAverages.memoryFree += vmAverages.memory - destinationAverages.memoryFree -= vmAverages.memory - - debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id}).`) - optimizationsCount++ - - // promises.push( - // xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) - // ) - } - - await Promise.all(promises) - debug(`Performance mode: ${optimizationsCount} optimizations for Host (${exceededHost.id}).`) - - return - } -} - -// =================================================================== - -class DensityPlan extends Plan { - constructor (xo, name, poolIds, options) { - super(xo, name, poolIds, options) - } - - _checkRessourcesThresholds (objects, averages) { - return filter(objects, object => - averages[object.id].memoryFree > this._thresholds.memoryFree.low - ) - } - - async execute () { - const results = await this._findHostsToOptimize() - - if (!results) { - return - } - - const { - hosts, - toOptimize - } = results - - let { - averages: hostsAverages - } = results - - const pools = await this._getPlanPools() - let optimizationsCount = 0 - - for (const hostToOptimize of toOptimize) { - const { - id: hostId, - $poolId: poolId - } = hostToOptimize - - const { - master: masterId - } = pools[poolId] - - // Avoid master optimization. - if (masterId === hostId) { - continue - } - - let poolMaster // Pool master. - const poolHosts = [] // Without master. - const masters = [] // Without the master of this loop. - const otherHosts = [] - - for (const dest of hosts) { - const { - id: destId, - $poolId: destPoolId - } = dest - - // Destination host != Host to optimize! - if (destId === hostId) { - continue - } - - if (destPoolId === poolId) { - if (destId === masterId) { - poolMaster = dest - } else { - poolHosts.push(dest) - } - } else if (destId === pools[destPoolId].master) { - masters.push(dest) - } else { - otherHosts.push(dest) - } - } - - const simulResults = await this._simulate({ - host: hostToOptimize, - destinations: [ - [ poolMaster ], - poolHosts, - masters, - otherHosts - ], - hostsAverages: clone(hostsAverages) - }) - - if (simulResults) { - // Update stats. - hostsAverages = simulResults.hostsAverages - - // Migrate. - await this._migrate(simulResults.moves) - optimizationsCount++ - } - } - - debug(`Density mode: ${optimizationsCount} optimizations.`) - } - - async _simulate ({ host, destinations, hostsAverages }) { - const { id: hostId } = host - - debug(`Try to optimize Host (${hostId}).`) - - const vms = await this._getVms(hostId) - const vmsAverages = await this._getVmsAverages(vms, host) - - // Sort vms by amount of memory. (+ -> -) - vms.sort((a, b) => - vmsAverages[b.id].memory - vmsAverages[a.id].memory - ) - - const simulResults = { - hostsAverages, - moves: [] - } - - // Try to find a destination for each VM. - for (const vm of vms) { - let move - - // Simulate the VM move on a destinations set. - for (const subDestinations of destinations) { - move = this._testMigration({ - vm, - destinations: subDestinations, - hostsAverages, - vmsAverages - }) - - // Destination found. - if (move) { - simulResults.moves.push(move) - break - } - } - - // Unable to move a VM. - if (!move) { - return - } - } - - // Done. - return simulResults - } - - // Test if a VM migration on a destination (of a destinations set) is possible. - _testMigration ({ vm, destinations, hostsAverages, vmsAverages }) { - const { - _thresholds: { - critical: criticalThreshold - } - } = this - - // Sort the destinations by available memory. (- -> +) - destinations.sort((a, b) => - hostsAverages[a.id].memoryFree - hostsAverages[b.id].memoryFree - ) - - for (const destination of destinations) { - const destinationAverages = hostsAverages[destination.id] - const vmAverages = vmsAverages[vm.id] - - // Unable to move the VM. - if ( - destinationAverages.cpu + vmAverages.cpu >= criticalThreshold || - destinationAverages.memoryFree - vmAverages.memory <= criticalThreshold - ) { - continue - } - - destinationAverages.cpu += vmAverages.cpu - destinationAverages.memoryFree -= vmAverages.memory - - // Available movement. - return { - vm, - destination - } - } - } - - async _migrate (moves) { - await Promise.all( - mapToArray(moves, move => { - const { - vm, - destination - } = move - const xapiSrc = this.xo.getXapi(destination) - debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${vm.$container}).`) - // xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) - }) - ) - } -} - // =================================================================== // =================================================================== diff --git a/packages/xo-server-load-balancer/src/performance-plan.js b/packages/xo-server-load-balancer/src/performance-plan.js new file mode 100644 index 000000000..18d3eef11 --- /dev/null +++ b/packages/xo-server-load-balancer/src/performance-plan.js @@ -0,0 +1,112 @@ +import filter from 'lodash.filter' + +import Plan from './plan' +import { + debug, + searchObject +} from './utils' + +// =================================================================== + +export default class PerformancePlan extends Plan { + constructor (xo, name, poolIds, options) { + super(xo, name, poolIds, options) + } + + _checkRessourcesThresholds (objects, averages) { + return filter(objects, object => { + const objectAverages = averages[object.id] + + return ( + objectAverages.cpu >= this._thresholds.cpu.high || + objectAverages.memoryFree <= this._thresholds.memoryFree.high + ) + }) + } + + async execute () { + const results = await this._findHostsToOptimize() + + if (!results) { + return + } + + const { + averages, + toOptimize + } = results + let { hosts } = results + + toOptimize.sort((a, b) => { + a = averages[a.id] + b = averages[b.id] + + return (b.cpu - a.cpu) || (a.memoryFree - b.memoryFree) + }) + + for (const exceededHost of toOptimize) { + const { id } = exceededHost + + debug(`Try to optimize Host (${exceededHost.id}).`) + hosts = filter(hosts, host => host.id !== id) + + // Search bests combinations for the worst host. + await this._optimize({ + exceededHost, + hosts, + hostsAverages: averages + }) + } + } + + async _optimize ({ exceededHost, hosts, hostsAverages }) { + const vms = await this._getVms(exceededHost.id) + const vmsAverages = await this._getVmsAverages(vms, exceededHost) + + // Sort vms by cpu usage. (lower to higher) + vms.sort((a, b) => + vmsAverages[b.id].cpu - vmsAverages[a.id].cpu + ) + + const exceededAverages = hostsAverages[exceededHost.id] + const promises = [] + + const xapiSrc = this.xo.getXapi(exceededHost) + let optimizationsCount = 0 + + for (const vm of vms) { + // Search host with lower cpu usage. + const destination = searchObject(hosts, (a, b) => + hostsAverages[b.id].cpu - hostsAverages[a.id].cpu + ) + const destinationAverages = hostsAverages[destination.id] + const vmAverages = vmsAverages[vm.id] + + // Unable to move the vm. + if ( + exceededAverages.cpu - vmAverages.cpu < destinationAverages.cpu + vmAverages.cpu || + destinationAverages.memoryFree > vmAverages.memory + ) { + continue + } + + exceededAverages.cpu -= vmAverages.cpu + destinationAverages.cpu += vmAverages.cpu + + exceededAverages.memoryFree += vmAverages.memory + destinationAverages.memoryFree -= vmAverages.memory + + debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id}).`) + optimizationsCount++ + + // promises.push( + // xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) + // ) + } + + await Promise.all(promises) + debug(`Performance mode: ${optimizationsCount} optimizations for Host (${exceededHost.id}).`) + + return + } +} diff --git a/packages/xo-server-load-balancer/src/plan.js b/packages/xo-server-load-balancer/src/plan.js new file mode 100644 index 000000000..96c9cf52d --- /dev/null +++ b/packages/xo-server-load-balancer/src/plan.js @@ -0,0 +1,257 @@ +import differenceBy from 'lodash.differenceby' +import filter from 'lodash.filter' +import includes from 'lodash.includes' +import { default as mapToArray } from 'lodash.map' + +import { + EXECUTION_DELAY, + debug +} from './utils' + +const MINUTES_OF_HISTORICAL_DATA = 30 + +// CPU threshold in percent. +const DEFAULT_CRITICAL_THRESHOLD_CPU = 90.0 + +// Memory threshold in MB. +const DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE = 64.0 + +// Thresholds factors. +const HIGH_THRESHOLD_FACTOR = 0.85 +const LOW_THRESHOLD_FACTOR = 0.25 + +const HIGH_THRESHOLD_MEMORY_FREE_FACTOR = 1.25 +const LOW_THRESHOLD_MEMORY_FREE_FACTOR = 20.0 + +const numberOrDefault = (value, def) => (value >= 0) ? value : def + +// =================================================================== +// Averages. +// =================================================================== + +function computeAverage (values, nPoints = values.length) { + let sum = 0 + let tot = 0 + + const { length } = values + + for (let i = length - nPoints; i < length; i++) { + const value = values[i] + + sum += value || 0 + + if (value) { + tot += 1 + } + } + + return sum / tot +} + +function computeRessourcesAverage (objects, objectsStats, nPoints) { + const averages = {} + + for (const object of objects) { + const { id } = object + const { stats } = objectsStats[id] + + averages[id] = { + cpu: computeAverage( + mapToArray(stats.cpus, cpu => computeAverage(cpu, nPoints)) + ), + nCpus: stats.cpus.length, + memoryFree: computeAverage(stats.memoryFree, nPoints), + memory: computeAverage(stats.memory, nPoints) + } + } + + return averages +} + +function computeRessourcesAverageWithWeight (averages1, averages2, ratio) { + const averages = {} + + for (const id in averages1) { + const objectAverages = averages[id] = {} + + for (const averageName in averages1[id]) { + objectAverages[averageName] = averages1[id][averageName] * ratio + averages2[id][averageName] * (1 - ratio) + } + } + + return averages +} + +function setRealCpuAverageOfVms (vms, vmsAverages, nCpus) { + for (const vm of vms) { + const averages = vmsAverages[vm.id] + averages.cpu *= averages.nCpus / nCpus + } +} + +// =================================================================== + +export default class Plan { + constructor (xo, name, poolIds, { + excludedHosts, + thresholds + } = {}) { + this.xo = xo + this._name = name + this._poolIds = poolIds + this._excludedHosts = excludedHosts + this._thresholds = { + cpu: { + critical: numberOrDefault(thresholds.cpu, DEFAULT_CRITICAL_THRESHOLD_CPU) + }, + memoryFree: { + critical: numberOrDefault(thresholds.memoryFree, DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE) * 1024 + } + } + + for (const key in this._thresholds) { + const attr = this._thresholds[key] + const { critical } = attr + + if (key === 'memoryFree') { + attr.high = critical * HIGH_THRESHOLD_MEMORY_FREE_FACTOR + attr.low = critical * LOW_THRESHOLD_MEMORY_FREE_FACTOR + } else { + attr.high = critical * HIGH_THRESHOLD_FACTOR + attr.low = critical * LOW_THRESHOLD_FACTOR + } + } + } + + execute () { + throw new Error('Not implemented') + } + + // =================================================================== + // Get hosts to optimize. + // =================================================================== + + async _findHostsToOptimize () { + const hosts = this._getHosts() + const hostsStats = await this._getHostsStats(hosts, 'minutes') + + // Check if a ressource's utilization exceeds threshold. + const avgNow = computeRessourcesAverage(hosts, hostsStats, EXECUTION_DELAY) + let toOptimize = this._checkRessourcesThresholds(hosts, avgNow) + + // No ressource's utilization problem. + if (toOptimize.length === 0) { + debug('No hosts to optimize.') + return + } + + // Check in the last 30 min interval with ratio. + const avgBefore = computeRessourcesAverage(hosts, hostsStats, MINUTES_OF_HISTORICAL_DATA) + const avgWithRatio = computeRessourcesAverageWithWeight(avgNow, avgBefore, 0.75) + + toOptimize = this._checkRessourcesThresholds(toOptimize, avgWithRatio) + + // No ressource's utilization problem. + if (toOptimize.length === 0) { + debug('No hosts to optimize.') + return + } + + return { + toOptimize, + averages: avgWithRatio, + hosts + } + } + + _checkRessourcesThresholds () { + throw new Error('Not implemented') + } + + // =================================================================== + // Get objects. + // =================================================================== + + _getPlanPools () { + const pools = {} + + try { + for (const poolId of this._poolIds) { + pools[poolId] = this.xo.getObject(poolId) + } + } catch (_) { + return {} + } + + return pools + } + + // Compute hosts for each pool. They can change over time. + _getHosts () { + return differenceBy( + filter(this.xo.getObjects(), object => + object.type === 'host' && includes(this._poolIds, object.$poolId) + ), + this._excludedHosts, + val => val.id || val + ) + } + + async _getVms (hostId) { + return filter(this.xo.getObjects(), object => + object.type === 'VM' && + object.power_state === 'Running' && + object.$container === hostId + ) + } + + // =================================================================== + // Get stats. + // =================================================================== + + async _getHostsStats (hosts, granularity) { + const hostsStats = {} + + await Promise.all(mapToArray(hosts, host => + this.xo.getXapiHostStats(host, granularity).then(hostStats => { + hostsStats[host.id] = { + nPoints: hostStats.stats.cpus[0].length, + stats: hostStats.stats, + averages: {} + } + }) + )) + + return hostsStats + } + + async _getVmsStats (vms, granularity) { + const vmsStats = {} + + await Promise.all(mapToArray(vms, vm => + this.xo.getXapiVmStats(vm, granularity).then(vmStats => { + vmsStats[vm.id] = { + nPoints: vmStats.stats.cpus[0].length, + stats: vmStats.stats, + averages: {} + } + }) + )) + + return vmsStats + } + + async _getVmsAverages (vms, host) { + const vmsStats = await this._getVmsStats(vms, 'minutes') + const vmsAverages = computeRessourcesAverageWithWeight( + computeRessourcesAverage(vms, vmsStats, EXECUTION_DELAY), + computeRessourcesAverage(vms, vmsStats, MINUTES_OF_HISTORICAL_DATA), + 0.75 + ) + + // Compute real CPU usage. Virtuals cpus to reals cpus. + setRealCpuAverageOfVms(vms, vmsAverages, host.CPUs.cpu_count) + + return vmsAverages + } +} diff --git a/packages/xo-server-load-balancer/src/utils.js b/packages/xo-server-load-balancer/src/utils.js new file mode 100644 index 000000000..d2eca97e1 --- /dev/null +++ b/packages/xo-server-load-balancer/src/utils.js @@ -0,0 +1,26 @@ +const noop = () => {} + +const LOAD_BALANCER_DEBUG = 1 + +// Delay between each ressources evaluation in minutes. +// Must be less than MINUTES_OF_HISTORICAL_DATA. +export const EXECUTION_DELAY = 1 + +// =================================================================== + +export const debug = LOAD_BALANCER_DEBUG + ? str => console.log(`[load-balancer]${str}`) + : noop + +// Compare a list of objects and give the best. +export function searchObject (objects, fun) { + let object = objects[0] + + for (let i = 1; i < objects.length; i++) { + if (fun(object, objects[i]) > 0) { + object = objects[i] + } + } + + return object +} From 42cef0da8815591c8584b053956f4d0db83e7f02 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 24 Mar 2016 11:44:23 +0100 Subject: [PATCH 37/50] Performance and density mode work. --- .../src/density-plan.js | 34 ++++++++++++++++--- .../src/performance-plan.js | 6 ++-- packages/xo-server-load-balancer/src/plan.js | 8 +++-- 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/packages/xo-server-load-balancer/src/density-plan.js b/packages/xo-server-load-balancer/src/density-plan.js index 068d01202..25019c1c0 100644 --- a/packages/xo-server-load-balancer/src/density-plan.js +++ b/packages/xo-server-load-balancer/src/density-plan.js @@ -54,6 +54,12 @@ export default class DensityPlan extends Plan { continue } + // A host to optimize needs the ability to be restarted. + if (hostToOptimize.powerOnMode === '') { + debug(`Host (${hostId}) does not have a power on mode.`) + continue + } + let poolMaster // Pool master. const poolHosts = [] // Without master. const masters = [] // Without the master of this loop. @@ -99,7 +105,7 @@ export default class DensityPlan extends Plan { hostsAverages = simulResults.hostsAverages // Migrate. - await this._migrate(simulResults.moves) + await this._migrate(hostId, simulResults.moves) optimizationsCount++ } } @@ -115,6 +121,13 @@ export default class DensityPlan extends Plan { const vms = await this._getVms(hostId) const vmsAverages = await this._getVmsAverages(vms, host) + for (const vm of vms) { + if (!vm.xenTools) { + debug(`VM (${vm.id}) of Host (${hostId}) does not support pool migration.`) + return + } + } + // Sort vms by amount of memory. (+ -> -) vms.sort((a, b) => vmsAverages[b.id].memory - vmsAverages[a.id].memory @@ -180,6 +193,7 @@ export default class DensityPlan extends Plan { continue } + // Move ok. Update stats. destinationAverages.cpu += vmAverages.cpu destinationAverages.memoryFree -= vmAverages.memory @@ -191,17 +205,29 @@ export default class DensityPlan extends Plan { } } - async _migrate (moves) { + // Migrate the VMs of one host. + // Try to shutdown the VMs host. + async _migrate (hostId, moves) { + const xapiSrc = this.xo.getXapi(hostId) + await Promise.all( mapToArray(moves, move => { const { vm, destination } = move - const xapiSrc = this.xo.getXapi(destination) + const xapiDest = this.xo.getXapi(destination) debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${vm.$container}).`) - // xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) + return xapiDest.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) }) ) + + debug(`Shutdown Host (${hostId}).`) + + try { + await xapiSrc.shutdownHost(hostId) + } catch (error) { + debug(`Unable to shutdown Host (${hostId}).`, error) + } } } diff --git a/packages/xo-server-load-balancer/src/performance-plan.js b/packages/xo-server-load-balancer/src/performance-plan.js index 18d3eef11..bbf4103b5 100644 --- a/packages/xo-server-load-balancer/src/performance-plan.js +++ b/packages/xo-server-load-balancer/src/performance-plan.js @@ -99,9 +99,9 @@ export default class PerformancePlan extends Plan { debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id}).`) optimizationsCount++ - // promises.push( - // xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) - // ) + promises.push( + xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId) + ) } await Promise.all(promises) diff --git a/packages/xo-server-load-balancer/src/plan.js b/packages/xo-server-load-balancer/src/plan.js index 96c9cf52d..d4b240fe1 100644 --- a/packages/xo-server-load-balancer/src/plan.js +++ b/packages/xo-server-load-balancer/src/plan.js @@ -189,9 +189,11 @@ export default class Plan { // Compute hosts for each pool. They can change over time. _getHosts () { return differenceBy( - filter(this.xo.getObjects(), object => - object.type === 'host' && includes(this._poolIds, object.$poolId) - ), + filter(this.xo.getObjects(), object => { + object.type === 'host' && + includes(this._poolIds, object.$poolId) && + object.power_state !== 'Halted' + }), this._excludedHosts, val => val.id || val ) From 627227f2f9f1790f6807b8ce26f03b0cba26768f Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 24 Mar 2016 11:48:49 +0100 Subject: [PATCH 38/50] 0.1.0 --- packages/xo-server-load-balancer/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/xo-server-load-balancer/package.json b/packages/xo-server-load-balancer/package.json index e987f0f16..204213c22 100644 --- a/packages/xo-server-load-balancer/package.json +++ b/packages/xo-server-load-balancer/package.json @@ -1,6 +1,6 @@ { "name": "xo-server-load-balancer", - "version": "0.0.1", + "version": "0.1.0", "license": "AGPL-3.0", "description": "Load balancer for XO-Server", "keywords": [ From 3e285d6131b640078fae1517843804f3edc8618e Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 6 Apr 2016 15:24:34 +0200 Subject: [PATCH 39/50] Search destination host per pool. (Performance mode) --- packages/xo-server-load-balancer/package.json | 2 +- .../src/performance-plan.js | 32 ++++++++++++++----- packages/xo-server-load-balancer/src/plan.js | 16 ++++------ packages/xo-server-load-balancer/src/utils.js | 13 -------- 4 files changed, 31 insertions(+), 32 deletions(-) diff --git a/packages/xo-server-load-balancer/package.json b/packages/xo-server-load-balancer/package.json index 204213c22..0e5091f7e 100644 --- a/packages/xo-server-load-balancer/package.json +++ b/packages/xo-server-load-balancer/package.json @@ -34,8 +34,8 @@ "cron": "^1.1.0", "event-to-promise": "^0.6.0", "lodash.clonedeep": "^4.3.1", - "lodash.differenceby": "^4.2.1", "lodash.filter": "^4.2.0", + "lodash.find": "^4.3.0", "lodash.includes": "^4.1.0", "lodash.intersection": "^4.1.0", "lodash.map": "^4.2.0", diff --git a/packages/xo-server-load-balancer/src/performance-plan.js b/packages/xo-server-load-balancer/src/performance-plan.js index bbf4103b5..981a2e627 100644 --- a/packages/xo-server-load-balancer/src/performance-plan.js +++ b/packages/xo-server-load-balancer/src/performance-plan.js @@ -1,10 +1,21 @@ import filter from 'lodash.filter' +import find from 'lodash.find' import Plan from './plan' -import { - debug, - searchObject -} from './utils' +import { debug } from './utils' + +// Compare a list of objects and give the best. +function searchBestObject (objects, fun) { + let object = objects[0] + + for (let i = 1; i < objects.length; i++) { + if (fun(object, objects[i]) > 0) { + object = objects[i] + } + } + + return object +} // =================================================================== @@ -74,11 +85,16 @@ export default class PerformancePlan extends Plan { const xapiSrc = this.xo.getXapi(exceededHost) let optimizationsCount = 0 + const searchFunction = (a, b) => hostsAverages[b.id].cpu - hostsAverages[a.id].cpu + for (const vm of vms) { - // Search host with lower cpu usage. - const destination = searchObject(hosts, (a, b) => - hostsAverages[b.id].cpu - hostsAverages[a.id].cpu - ) + // Search host with lower cpu usage in the same pool first. In other pool if necessary. + let destination = searchBestObject(find(hosts, host => host.$poolId === vm.$poolId), searchFunction) + + if (!destination) { + destination = searchBestObject(hosts, searchFunction) + } + const destinationAverages = hostsAverages[destination.id] const vmAverages = vmsAverages[vm.id] diff --git a/packages/xo-server-load-balancer/src/plan.js b/packages/xo-server-load-balancer/src/plan.js index d4b240fe1..2f24b0ce9 100644 --- a/packages/xo-server-load-balancer/src/plan.js +++ b/packages/xo-server-load-balancer/src/plan.js @@ -1,4 +1,3 @@ -import differenceBy from 'lodash.differenceby' import filter from 'lodash.filter' import includes from 'lodash.includes' import { default as mapToArray } from 'lodash.map' @@ -188,15 +187,12 @@ export default class Plan { // Compute hosts for each pool. They can change over time. _getHosts () { - return differenceBy( - filter(this.xo.getObjects(), object => { - object.type === 'host' && - includes(this._poolIds, object.$poolId) && - object.power_state !== 'Halted' - }), - this._excludedHosts, - val => val.id || val - ) + return filter(this.xo.getObjects(), object => ( + object.type === 'host' && + includes(this._poolIds, object.$poolId) && + object.power_state !== 'Halted' && + !includes(this._excludedHosts, object.id) + )) } async _getVms (hostId) { diff --git a/packages/xo-server-load-balancer/src/utils.js b/packages/xo-server-load-balancer/src/utils.js index d2eca97e1..f7894a639 100644 --- a/packages/xo-server-load-balancer/src/utils.js +++ b/packages/xo-server-load-balancer/src/utils.js @@ -11,16 +11,3 @@ export const EXECUTION_DELAY = 1 export const debug = LOAD_BALANCER_DEBUG ? str => console.log(`[load-balancer]${str}`) : noop - -// Compare a list of objects and give the best. -export function searchObject (objects, fun) { - let object = objects[0] - - for (let i = 1; i < objects.length; i++) { - if (fun(object, objects[i]) > 0) { - object = objects[i] - } - } - - return object -} From f0c28c74d8b352820481e516b02a611f65254e0e Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 6 Apr 2016 16:00:01 +0200 Subject: [PATCH 40/50] Try to power on a hosts set. (Performance mode) --- .../xo-server-load-balancer/src/density-plan.js | 4 +--- .../src/performance-plan.js | 16 ++++++++++++++++ packages/xo-server-load-balancer/src/plan.js | 4 ++-- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/packages/xo-server-load-balancer/src/density-plan.js b/packages/xo-server-load-balancer/src/density-plan.js index 25019c1c0..c3064ec76 100644 --- a/packages/xo-server-load-balancer/src/density-plan.js +++ b/packages/xo-server-load-balancer/src/density-plan.js @@ -3,9 +3,7 @@ import clone from 'lodash.clonedeep' import { default as mapToArray } from 'lodash.map' import Plan from './plan' -import { - debug -} from './utils' +import { debug } from './utils' // =================================================================== diff --git a/packages/xo-server-load-balancer/src/performance-plan.js b/packages/xo-server-load-balancer/src/performance-plan.js index 981a2e627..f372c2d21 100644 --- a/packages/xo-server-load-balancer/src/performance-plan.js +++ b/packages/xo-server-load-balancer/src/performance-plan.js @@ -1,5 +1,6 @@ import filter from 'lodash.filter' import find from 'lodash.find' +import { default as mapToArray } from 'lodash.map' import Plan from './plan' import { debug } from './utils' @@ -36,6 +37,21 @@ export default class PerformancePlan extends Plan { } async execute () { + // Try to power on a hosts set. + try { + await Promise.all( + mapToArray( + filter(this._getHosts({ powerState: 'Halted' }), host => host.powerOnMode !== ''), + host => { + const { id } = host + return this.xo.getXapi(id).powerOnHost(id) + } + ) + ) + } catch (error) { + console.error(error) + } + const results = await this._findHostsToOptimize() if (!results) { diff --git a/packages/xo-server-load-balancer/src/plan.js b/packages/xo-server-load-balancer/src/plan.js index 2f24b0ce9..274368355 100644 --- a/packages/xo-server-load-balancer/src/plan.js +++ b/packages/xo-server-load-balancer/src/plan.js @@ -186,11 +186,11 @@ export default class Plan { } // Compute hosts for each pool. They can change over time. - _getHosts () { + _getHosts ({ powerState = 'Running' } = {}) { return filter(this.xo.getObjects(), object => ( object.type === 'host' && includes(this._poolIds, object.$poolId) && - object.power_state !== 'Halted' && + object.power_state === powerState && !includes(this._excludedHosts, object.id) )) } From aa54ab6e51fb60a99fdb349017f622608face4f1 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Wed, 6 Apr 2016 16:01:24 +0200 Subject: [PATCH 41/50] 0.2.0 --- packages/xo-server-load-balancer/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/xo-server-load-balancer/package.json b/packages/xo-server-load-balancer/package.json index 0e5091f7e..50a72acd7 100644 --- a/packages/xo-server-load-balancer/package.json +++ b/packages/xo-server-load-balancer/package.json @@ -1,6 +1,6 @@ { "name": "xo-server-load-balancer", - "version": "0.1.0", + "version": "0.2.0", "license": "AGPL-3.0", "description": "Load balancer for XO-Server", "keywords": [ From 42ea76eb2ab1e526bee7530cf4add4e9d7d61a91 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 30 Jun 2016 14:10:59 +0200 Subject: [PATCH 42/50] Supports ui schema builder of xo-web. --- packages/xo-server-load-balancer/src/index.js | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 70a78ff76..f6983f913 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -47,8 +47,12 @@ export const configurationSchema = { pools: { type: 'array', - $type: 'Pool', - description: 'list of pools where to apply the policy' + description: 'list of pools where to apply the policy', + + items: { + type: 'string', + $type: 'Pool' + } }, thresholds: { @@ -72,8 +76,12 @@ export const configurationSchema = { excludedHosts: { type: 'array', title: 'Excluded hosts', - $type: 'Host', - description: 'list of hosts that are not affected by the plan' + description: 'list of hosts that are not affected by the plan', + + items: { + type: 'string', + $type: 'Host' + } } }, From ffc155c34189b246f899dad68d99c02d34959b84 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 30 Jun 2016 14:12:19 +0200 Subject: [PATCH 43/50] 0.2.1 --- packages/xo-server-load-balancer/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/xo-server-load-balancer/package.json b/packages/xo-server-load-balancer/package.json index 50a72acd7..1788deae4 100644 --- a/packages/xo-server-load-balancer/package.json +++ b/packages/xo-server-load-balancer/package.json @@ -1,6 +1,6 @@ { "name": "xo-server-load-balancer", - "version": "0.2.0", + "version": "0.2.1", "license": "AGPL-3.0", "description": "Load balancer for XO-Server", "keywords": [ From 10030c495956ba0239f25533dc375a7a4b2c6a69 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 7 Jul 2016 11:58:12 +0200 Subject: [PATCH 44/50] Use default value on cpu/memoryFree. --- packages/xo-server-load-balancer/src/index.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index f6983f913..e95f95803 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -63,12 +63,12 @@ export const configurationSchema = { cpu: { type: 'integer', title: 'CPU (%)', - description: 'default: 90%' + default: 90 }, memoryFree: { type: 'integer', title: 'RAM, Free memory (MB)', - description: 'default: 64MB' + default: 64 } } }, From cd8a92c30b3d48e6875c6c62054728d1dc8754a4 Mon Sep 17 00:00:00 2001 From: wescoeur Date: Thu, 7 Jul 2016 12:00:24 +0200 Subject: [PATCH 45/50] 0.3.0 --- packages/xo-server-load-balancer/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/xo-server-load-balancer/package.json b/packages/xo-server-load-balancer/package.json index 1788deae4..aff4d6c47 100644 --- a/packages/xo-server-load-balancer/package.json +++ b/packages/xo-server-load-balancer/package.json @@ -1,6 +1,6 @@ { "name": "xo-server-load-balancer", - "version": "0.2.1", + "version": "0.3.0", "license": "AGPL-3.0", "description": "Load balancer for XO-Server", "keywords": [ From 2b8996e9656a56feb58d718aee63b1fd985e93f2 Mon Sep 17 00:00:00 2001 From: Julien Fontanet Date: Thu, 29 Sep 2016 14:00:25 +0200 Subject: [PATCH 46/50] chore(package): update all dependencies --- packages/xo-server-load-balancer/.babelrc | 11 ---- .../xo-server-load-balancer/.editorconfig | 2 +- packages/xo-server-load-balancer/.gitignore | 6 +- packages/xo-server-load-balancer/.travis.yml | 2 - packages/xo-server-load-balancer/README.md | 11 +--- packages/xo-server-load-balancer/package.json | 57 ++++++++++++------- .../src/density-plan.js | 8 +-- packages/xo-server-load-balancer/src/index.js | 4 +- .../xo-server-load-balancer/src/index.spec.js | 8 ++- .../src/performance-plan.js | 8 +-- packages/xo-server-load-balancer/src/plan.js | 4 +- 11 files changed, 50 insertions(+), 71 deletions(-) delete mode 100644 packages/xo-server-load-balancer/.babelrc diff --git a/packages/xo-server-load-balancer/.babelrc b/packages/xo-server-load-balancer/.babelrc deleted file mode 100644 index bc055f47f..000000000 --- a/packages/xo-server-load-balancer/.babelrc +++ /dev/null @@ -1,11 +0,0 @@ -{ - "comments": false, - "compact": true, - "optional": [ - "es7.asyncFunctions", - "es7.decorators", - "es7.exportExtensions", - "es7.functionBind", - "runtime" - ] -} diff --git a/packages/xo-server-load-balancer/.editorconfig b/packages/xo-server-load-balancer/.editorconfig index da21ef4c5..b6db0112a 100644 --- a/packages/xo-server-load-balancer/.editorconfig +++ b/packages/xo-server-load-balancer/.editorconfig @@ -41,7 +41,7 @@ indent_style = space # # Two spaces seems to be the standard most common style, at least in # Node.js (http://nodeguide.com/style.html#tabs-vs-spaces). -[*.js] +[*.{js,jsx,ts,tsx}] indent_size = 2 indent_style = space diff --git a/packages/xo-server-load-balancer/.gitignore b/packages/xo-server-load-balancer/.gitignore index 6959be1cf..fa69f37f1 100644 --- a/packages/xo-server-load-balancer/.gitignore +++ b/packages/xo-server-load-balancer/.gitignore @@ -1,9 +1,5 @@ -/.nyc_output/ -/bower_components/ /dist/ +/node_modules/ npm-debug.log npm-debug.log.* - -!node_modules/* -node_modules/*/ diff --git a/packages/xo-server-load-balancer/.travis.yml b/packages/xo-server-load-balancer/.travis.yml index 502095fce..fb5d91729 100644 --- a/packages/xo-server-load-balancer/.travis.yml +++ b/packages/xo-server-load-balancer/.travis.yml @@ -2,8 +2,6 @@ language: node_js node_js: - 'stable' - '4' - - '0.12' - - '0.10' # Use containers. # http://docs.travis-ci.com/user/workers/container-based-infrastructure/ diff --git a/packages/xo-server-load-balancer/README.md b/packages/xo-server-load-balancer/README.md index 53c7d2b94..89d9e1fd7 100644 --- a/packages/xo-server-load-balancer/README.md +++ b/packages/xo-server-load-balancer/README.md @@ -7,18 +7,13 @@ XO-Server plugin that allows load balancing. Go inside your `xo-server` folder and install it: ``` -> npm install xo-server-load-balancer +> npm install --global xo-server-load-balancer ``` ## Usage -Edit your `xo-server` configuration and add the plugin name in the `plugins` section. - -```yaml -plugins: - - xo-server-load-balancer: -``` +Like all other xo-server plugins, it can be configured directly via +the web iterface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html). ## Development diff --git a/packages/xo-server-load-balancer/package.json b/packages/xo-server-load-balancer/package.json index aff4d6c47..b4b4847c4 100644 --- a/packages/xo-server-load-balancer/package.json +++ b/packages/xo-server-load-balancer/package.json @@ -27,46 +27,59 @@ "dist/" ], "engines": { - "node": ">=0.12" + "node": ">=4" }, "dependencies": { - "babel-runtime": "^5.8.34", + "babel-runtime": "^6.11.6", "cron": "^1.1.0", - "event-to-promise": "^0.6.0", - "lodash.clonedeep": "^4.3.1", - "lodash.filter": "^4.2.0", - "lodash.find": "^4.3.0", - "lodash.includes": "^4.1.0", - "lodash.intersection": "^4.1.0", - "lodash.map": "^4.2.0", - "lodash.uniq": "^4.2.0" + "event-to-promise": "^0.7.0", + "lodash": "^4.16.2" }, "devDependencies": { - "babel": "^5.8.34", - "babel-eslint": "^4.1.6", - "clarify": "^1.0.5", + "babel-cli": "^6.16.0", + "babel-eslint": "^7.0.0", + "babel-plugin-lodash": "^3.2.9", + "babel-plugin-transform-runtime": "^6.15.0", + "babel-preset-es2015": "^6.16.0", + "babel-preset-stage-0": "^6.16.0", + "clarify": "^2.0.0", "dependency-check": "^2.5.1", - "mocha": "^2.3.4", + "ghooks": "^1.3.2", + "mocha": "^3.1.0", "must": "^0.13.1", - "nyc": "^3.2.2", - "source-map-support": "^0.3.3", - "standard": "^5.4.1", + "source-map-support": "^0.4.3", + "standard": "^8.2.0", "trace": "^2.0.1" }, "scripts": { - "build": "babel --source-maps --out-dir=dist/ src/", - "dev": "babel --watch --source-maps --out-dir=dist/ src/", + "build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/", + "depcheck": "dependency-check ./package.json", + "dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/", "dev-test": "mocha --opts .mocha.opts --watch --reporter=min \"dist/**/*.spec.js\"", "lint": "standard", - "depcheck": "dependency-check ./package.json", "posttest": "npm run lint && npm run depcheck", "prepublish": "npm run build", - "test": "nyc mocha --opts .mocha.opts \"dist/**/*.spec.js\"" + "test": "mocha --opts .mocha.opts \"dist/**/*.spec.js\"" + }, + "babel": { + "plugins": [ + "transform-runtime", + "lodash" + ], + "presets": [ + "es2015", + "stage-0" + ] }, "standard": { "ignore": [ - "dist/**" + "dist" ], "parser": "babel-eslint" + }, + "config": { + "ghooks": { + "commit-msg": "npm test" + } } } diff --git a/packages/xo-server-load-balancer/src/density-plan.js b/packages/xo-server-load-balancer/src/density-plan.js index c3064ec76..b407d7547 100644 --- a/packages/xo-server-load-balancer/src/density-plan.js +++ b/packages/xo-server-load-balancer/src/density-plan.js @@ -1,6 +1,4 @@ -import filter from 'lodash.filter' -import clone from 'lodash.clonedeep' -import { default as mapToArray } from 'lodash.map' +import { clone, filter, map as mapToArray } from 'lodash' import Plan from './plan' import { debug } from './utils' @@ -8,10 +6,6 @@ import { debug } from './utils' // =================================================================== export default class DensityPlan extends Plan { - constructor (xo, name, poolIds, options) { - super(xo, name, poolIds, options) - } - _checkRessourcesThresholds (objects, averages) { return filter(objects, object => averages[object.id].memoryFree > this._thresholds.memoryFree.low diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index e95f95803..5f0924254 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -1,9 +1,7 @@ import EventEmitter from 'events' import eventToPromise from 'event-to-promise' -import intersection from 'lodash.intersection' -import uniq from 'lodash.uniq' import { CronJob } from 'cron' -import { default as mapToArray } from 'lodash.map' +import { intersection, map as mapToArray, uniq } from 'lodash' import DensityPlan from './density-plan' import PerformancePlan from './performance-plan' diff --git a/packages/xo-server-load-balancer/src/index.spec.js b/packages/xo-server-load-balancer/src/index.spec.js index 6e9c776d2..2319bd7d5 100644 --- a/packages/xo-server-load-balancer/src/index.spec.js +++ b/packages/xo-server-load-balancer/src/index.spec.js @@ -8,6 +8,10 @@ import myLib from './' // =================================================================== -describe('myLib', () => { - // TODO +describe.skip('myLib', () => { + it('does something', () => { + // TODO: some real tests. + + expect(myLib).to.exists() + }) }) diff --git a/packages/xo-server-load-balancer/src/performance-plan.js b/packages/xo-server-load-balancer/src/performance-plan.js index f372c2d21..fc0b3e6c2 100644 --- a/packages/xo-server-load-balancer/src/performance-plan.js +++ b/packages/xo-server-load-balancer/src/performance-plan.js @@ -1,6 +1,4 @@ -import filter from 'lodash.filter' -import find from 'lodash.find' -import { default as mapToArray } from 'lodash.map' +import { filter, find, map as mapToArray } from 'lodash' import Plan from './plan' import { debug } from './utils' @@ -21,10 +19,6 @@ function searchBestObject (objects, fun) { // =================================================================== export default class PerformancePlan extends Plan { - constructor (xo, name, poolIds, options) { - super(xo, name, poolIds, options) - } - _checkRessourcesThresholds (objects, averages) { return filter(objects, object => { const objectAverages = averages[object.id] diff --git a/packages/xo-server-load-balancer/src/plan.js b/packages/xo-server-load-balancer/src/plan.js index 274368355..72974a0dd 100644 --- a/packages/xo-server-load-balancer/src/plan.js +++ b/packages/xo-server-load-balancer/src/plan.js @@ -1,6 +1,4 @@ -import filter from 'lodash.filter' -import includes from 'lodash.includes' -import { default as mapToArray } from 'lodash.map' +import { filter, includes, map as mapToArray } from 'lodash' import { EXECUTION_DELAY, From 2a316b1ffa936ab615d03cf6e324b3c7b8a6aaee Mon Sep 17 00:00:00 2001 From: Julien Fontanet Date: Tue, 18 Oct 2016 17:31:46 +0200 Subject: [PATCH 47/50] chore(package): use constants in configuration schema --- packages/xo-server-load-balancer/src/index.js | 8 ++++++-- packages/xo-server-load-balancer/src/plan.js | 4 ++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/packages/xo-server-load-balancer/src/index.js b/packages/xo-server-load-balancer/src/index.js index 5f0924254..d6218d738 100644 --- a/packages/xo-server-load-balancer/src/index.js +++ b/packages/xo-server-load-balancer/src/index.js @@ -5,6 +5,10 @@ import { intersection, map as mapToArray, uniq } from 'lodash' import DensityPlan from './density-plan' import PerformancePlan from './performance-plan' +import { + DEFAULT_CRITICAL_THRESHOLD_CPU, + DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE +} from './plan' import { EXECUTION_DELAY, debug @@ -61,12 +65,12 @@ export const configurationSchema = { cpu: { type: 'integer', title: 'CPU (%)', - default: 90 + default: DEFAULT_CRITICAL_THRESHOLD_CPU }, memoryFree: { type: 'integer', title: 'RAM, Free memory (MB)', - default: 64 + default: DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE } } }, diff --git a/packages/xo-server-load-balancer/src/plan.js b/packages/xo-server-load-balancer/src/plan.js index 72974a0dd..e7d02f7a1 100644 --- a/packages/xo-server-load-balancer/src/plan.js +++ b/packages/xo-server-load-balancer/src/plan.js @@ -8,10 +8,10 @@ import { const MINUTES_OF_HISTORICAL_DATA = 30 // CPU threshold in percent. -const DEFAULT_CRITICAL_THRESHOLD_CPU = 90.0 +export const DEFAULT_CRITICAL_THRESHOLD_CPU = 90.0 // Memory threshold in MB. -const DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE = 64.0 +export const DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE = 64.0 // Thresholds factors. const HIGH_THRESHOLD_FACTOR = 0.85 From 0317d6a8624876f3513a228de6b13a0c5dedacb5 Mon Sep 17 00:00:00 2001 From: Julien Fontanet Date: Tue, 18 Oct 2016 17:33:29 +0200 Subject: [PATCH 48/50] chore(package): remove unused test env --- packages/xo-server-load-balancer/.mocha.js | 5 ----- packages/xo-server-load-balancer/.mocha.opts | 1 - packages/xo-server-load-balancer/package.json | 11 ++--------- .../xo-server-load-balancer/src/index.spec.js | 17 ----------------- 4 files changed, 2 insertions(+), 32 deletions(-) delete mode 100644 packages/xo-server-load-balancer/.mocha.js delete mode 100644 packages/xo-server-load-balancer/.mocha.opts delete mode 100644 packages/xo-server-load-balancer/src/index.spec.js diff --git a/packages/xo-server-load-balancer/.mocha.js b/packages/xo-server-load-balancer/.mocha.js deleted file mode 100644 index e6d84e403..000000000 --- a/packages/xo-server-load-balancer/.mocha.js +++ /dev/null @@ -1,5 +0,0 @@ -Error.stackTraceLimit = 100 - -try { require('trace') } catch (_) {} -try { require('clarify') } catch (_) {} -try { require('source-map-support/register') } catch (_) {} diff --git a/packages/xo-server-load-balancer/.mocha.opts b/packages/xo-server-load-balancer/.mocha.opts deleted file mode 100644 index 6cfd94898..000000000 --- a/packages/xo-server-load-balancer/.mocha.opts +++ /dev/null @@ -1 +0,0 @@ ---require ./.mocha.js diff --git a/packages/xo-server-load-balancer/package.json b/packages/xo-server-load-balancer/package.json index b4b4847c4..e0754373f 100644 --- a/packages/xo-server-load-balancer/package.json +++ b/packages/xo-server-load-balancer/package.json @@ -42,24 +42,17 @@ "babel-plugin-transform-runtime": "^6.15.0", "babel-preset-es2015": "^6.16.0", "babel-preset-stage-0": "^6.16.0", - "clarify": "^2.0.0", "dependency-check": "^2.5.1", "ghooks": "^1.3.2", - "mocha": "^3.1.0", - "must": "^0.13.1", - "source-map-support": "^0.4.3", - "standard": "^8.2.0", - "trace": "^2.0.1" + "standard": "^8.2.0" }, "scripts": { "build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/", "depcheck": "dependency-check ./package.json", "dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/", - "dev-test": "mocha --opts .mocha.opts --watch --reporter=min \"dist/**/*.spec.js\"", "lint": "standard", "posttest": "npm run lint && npm run depcheck", - "prepublish": "npm run build", - "test": "mocha --opts .mocha.opts \"dist/**/*.spec.js\"" + "prepublish": "npm run build" }, "babel": { "plugins": [ diff --git a/packages/xo-server-load-balancer/src/index.spec.js b/packages/xo-server-load-balancer/src/index.spec.js deleted file mode 100644 index 2319bd7d5..000000000 --- a/packages/xo-server-load-balancer/src/index.spec.js +++ /dev/null @@ -1,17 +0,0 @@ -/* eslint-env mocha */ - -import expect from 'must' - -// =================================================================== - -import myLib from './' - -// =================================================================== - -describe.skip('myLib', () => { - it('does something', () => { - // TODO: some real tests. - - expect(myLib).to.exists() - }) -}) From 01686b8e60b7c6a95a3247e73a5e2f44416f3c45 Mon Sep 17 00:00:00 2001 From: Julien Fontanet Date: Tue, 18 Oct 2016 17:34:06 +0200 Subject: [PATCH 49/50] fix(package): behave with missing thresholds object --- packages/xo-server-load-balancer/src/plan.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/xo-server-load-balancer/src/plan.js b/packages/xo-server-load-balancer/src/plan.js index e7d02f7a1..4ac09c057 100644 --- a/packages/xo-server-load-balancer/src/plan.js +++ b/packages/xo-server-load-balancer/src/plan.js @@ -99,10 +99,10 @@ export default class Plan { this._excludedHosts = excludedHosts this._thresholds = { cpu: { - critical: numberOrDefault(thresholds.cpu, DEFAULT_CRITICAL_THRESHOLD_CPU) + critical: numberOrDefault(thresholds && thresholds.cpu, DEFAULT_CRITICAL_THRESHOLD_CPU) }, memoryFree: { - critical: numberOrDefault(thresholds.memoryFree, DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE) * 1024 + critical: numberOrDefault(thresholds && thresholds.memoryFree, DEFAULT_CRITICAL_THRESHOLD_MEMORY_FREE) * 1024 } } From 52774c7d6dea4f6ffcc8fd880ad9881f1918fbc0 Mon Sep 17 00:00:00 2001 From: Julien Fontanet Date: Wed, 19 Oct 2016 13:17:10 +0200 Subject: [PATCH 50/50] 0.3.1 --- packages/xo-server-load-balancer/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/xo-server-load-balancer/package.json b/packages/xo-server-load-balancer/package.json index e0754373f..ceb2aa763 100644 --- a/packages/xo-server-load-balancer/package.json +++ b/packages/xo-server-load-balancer/package.json @@ -1,6 +1,6 @@ { "name": "xo-server-load-balancer", - "version": "0.3.0", + "version": "0.3.1", "license": "AGPL-3.0", "description": "Load balancer for XO-Server", "keywords": [