Compare commits
271 Commits
fix-ips-ba
...
pierre-fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
67f7ce40da | ||
|
|
a00e3e6f41 | ||
|
|
82ba02b4f3 | ||
|
|
d70ae6ebe3 | ||
|
|
f6c411a261 | ||
|
|
b606eaf9ee | ||
|
|
516edd1b09 | ||
|
|
e31c3b1f27 | ||
|
|
619818f968 | ||
|
|
79a80a1adf | ||
|
|
7cef48b995 | ||
|
|
7d3d1b1544 | ||
|
|
3f935f271d | ||
|
|
89935a1517 | ||
|
|
c67af4fb2f | ||
|
|
0b4adc36a0 | ||
|
|
44776b795f | ||
|
|
bec73a1c43 | ||
|
|
6ce35fdfa8 | ||
|
|
dabc2d0442 | ||
|
|
0527d3bc2b | ||
|
|
a7cfb71070 | ||
|
|
52003bedb4 | ||
|
|
a02fb8e739 | ||
|
|
60fad187a2 | ||
|
|
e8cd1e070f | ||
|
|
de6620be12 | ||
|
|
72dee73faa | ||
|
|
d8ce27907d | ||
|
|
3d8891d518 | ||
|
|
97742ccdc2 | ||
|
|
82fec86179 | ||
|
|
be83b53875 | ||
|
|
85fda0c18b | ||
|
|
a89f8fbd9c | ||
|
|
efdfa1f2f7 | ||
|
|
5bd61e3fb0 | ||
|
|
a45f83b646 | ||
|
|
16135b8e37 | ||
|
|
b011e8656f | ||
|
|
215432be6c | ||
|
|
d373760412 | ||
|
|
a1de04e285 | ||
|
|
23e16732fd | ||
|
|
5efac84b8b | ||
|
|
2cbc7b7d7d | ||
|
|
b1acbaecc2 | ||
|
|
6d61e8efff | ||
|
|
482e6b3cb3 | ||
|
|
445b13ec29 | ||
|
|
116af372dc | ||
|
|
970952783c | ||
|
|
e59cf13456 | ||
|
|
d0cfddce19 | ||
|
|
30b2a8dd8d | ||
|
|
b811ee7e7e | ||
|
|
ebe7f6784a | ||
|
|
e40792378f | ||
|
|
cc9c8fb891 | ||
|
|
ca06c4d403 | ||
|
|
c8aa058ede | ||
|
|
34169d685e | ||
|
|
d5a9d36815 | ||
|
|
c7aaeca530 | ||
|
|
863e4f0c19 | ||
|
|
0226e0553d | ||
|
|
02995d278f | ||
|
|
78a2104bcc | ||
|
|
4e9d143996 | ||
|
|
0811e5c765 | ||
|
|
b2cf2edd43 | ||
|
|
db493f6887 | ||
|
|
2cd0dec480 | ||
|
|
29024888fb | ||
|
|
dbcaab2bc1 | ||
|
|
28d445ae1c | ||
|
|
530360f859 | ||
|
|
738c55bad0 | ||
|
|
4b09bc85f5 | ||
|
|
5bc67d3570 | ||
|
|
f7ae6222b7 | ||
|
|
1e50dab093 | ||
|
|
d1935bf778 | ||
|
|
70a346d11e | ||
|
|
fd39a2063d | ||
|
|
682512fffe | ||
|
|
b13f91ec8d | ||
|
|
a140fc09ac | ||
|
|
f403a7e753 | ||
|
|
dfe5f412eb | ||
|
|
033d784c52 | ||
|
|
62c3fa13ca | ||
|
|
ce338cb6ca | ||
|
|
003eadc8fd | ||
|
|
8782151c5d | ||
|
|
b22c74c5a8 | ||
|
|
254fa36c01 | ||
|
|
a3e4253005 | ||
|
|
2388593b8a | ||
|
|
cdced63c1b | ||
|
|
45e1d1ecef | ||
|
|
f44447ce71 | ||
|
|
238e9cd8cc | ||
|
|
e171d8ed0e | ||
|
|
bd3399e04b | ||
|
|
2b4443f333 | ||
|
|
ab6548122f | ||
|
|
f81573d999 | ||
|
|
84ccebb858 | ||
|
|
530bc50e7c | ||
|
|
57e490fc23 | ||
|
|
61e902c094 | ||
|
|
8378ba77d6 | ||
|
|
c9e30b74e2 | ||
|
|
af944fd2e3 | ||
|
|
bcc0e76f1d | ||
|
|
95078d250a | ||
|
|
4b16a2c0c5 | ||
|
|
b8524732ce | ||
|
|
814fee4f47 | ||
|
|
d641d35d5c | ||
|
|
7464d95b57 | ||
|
|
8924a64622 | ||
|
|
3d6aa667fe | ||
|
|
147c3d2e7b | ||
|
|
ac298c3be3 | ||
|
|
e88848c44a | ||
|
|
cd518e3e4c | ||
|
|
114d521636 | ||
|
|
24d4fad394 | ||
|
|
6d8785e689 | ||
|
|
508cbf0a82 | ||
|
|
c83f56166d | ||
|
|
7199e1a214 | ||
|
|
85d55e97e7 | ||
|
|
cc2c71c076 | ||
|
|
9ca273b2c4 | ||
|
|
b85c2f35b6 | ||
|
|
abad2944fb | ||
|
|
2827544409 | ||
|
|
db0a399da1 | ||
|
|
87d2096ed7 | ||
|
|
d47f66548d | ||
|
|
fb9425e503 | ||
|
|
d75580e11d | ||
|
|
a64960ddd0 | ||
|
|
876850a7a7 | ||
|
|
0b689d99fa | ||
|
|
cd0064d19c | ||
|
|
b4baa6cd7b | ||
|
|
1ab2cdeed3 | ||
|
|
83c0281a33 | ||
|
|
437b0b0240 | ||
|
|
5c48697eda | ||
|
|
0feea5b7a6 | ||
|
|
9eb27fdd5e | ||
|
|
6e4a64232a | ||
|
|
4bbedeeea9 | ||
|
|
b5c004e870 | ||
|
|
a0ef1ab4f4 | ||
|
|
c9172a11a8 | ||
|
|
a0feee912e | ||
|
|
8e42b7b891 | ||
|
|
147d7e773f | ||
|
|
759ab1c5ee | ||
|
|
4c1581d845 | ||
|
|
e1c6e4347a | ||
|
|
256f117bbf | ||
|
|
3b0acf82c7 | ||
|
|
3a12f3d6c7 | ||
|
|
335ac5a595 | ||
|
|
d0e2e97007 | ||
|
|
85e1baa2dc | ||
|
|
0c66c39211 | ||
|
|
250afa38ca | ||
|
|
b7e58eeb3f | ||
|
|
6f024d78a6 | ||
|
|
1e48096f36 | ||
|
|
ccf6a1bedb | ||
|
|
3639edb4db | ||
|
|
d3bbe0b3b6 | ||
|
|
e8ab101993 | ||
|
|
ef98b10063 | ||
|
|
84943e7fe6 | ||
|
|
d0fa5ff385 | ||
|
|
3609559ced | ||
|
|
950c780122 | ||
|
|
32b510ef40 | ||
|
|
4cc33ed29b | ||
|
|
d72906a6ba | ||
|
|
d577b51a86 | ||
|
|
63d4865427 | ||
|
|
1355477e37 | ||
|
|
d50e1b4e02 | ||
|
|
606ae41698 | ||
|
|
b6ee5ae779 | ||
|
|
aeb1b2c30f | ||
|
|
35ace281cc | ||
|
|
6cd056eee5 | ||
|
|
6c664bfaa7 | ||
|
|
8890d445dc | ||
|
|
7a7db1ea08 | ||
|
|
e585a3e5c4 | ||
|
|
7336032009 | ||
|
|
d29bc63b24 | ||
|
|
2a9bd1d4cb | ||
|
|
6578c14292 | ||
|
|
ceee93883f | ||
|
|
dae8fd2370 | ||
|
|
48f8322390 | ||
|
|
7df833bd9f | ||
|
|
2d639e191a | ||
|
|
db758c6806 | ||
|
|
6822e4ac0c | ||
|
|
14b1b07ecd | ||
|
|
3c71a20bb2 | ||
|
|
8f73619ba1 | ||
|
|
0ee6e5a35f | ||
|
|
22692757e6 | ||
|
|
ed9584270d | ||
|
|
5a5c35a1c9 | ||
|
|
1f842e4fe4 | ||
|
|
9275c4a6d6 | ||
|
|
9c7e61cbf3 | ||
|
|
69a6066fd8 | ||
|
|
47d2d09e50 | ||
|
|
da648e0a78 | ||
|
|
9e1c526d51 | ||
|
|
d81998f91c | ||
|
|
a717d9b8f3 | ||
|
|
31d1243a14 | ||
|
|
2424222964 | ||
|
|
370b245d65 | ||
|
|
c4dfcc27e3 | ||
|
|
dfa870a777 | ||
|
|
572375fff4 | ||
|
|
ed1caee9f8 | ||
|
|
6f7757c81b | ||
|
|
4c92965313 | ||
|
|
bbce96eb67 | ||
|
|
e3cb7bd4c7 | ||
|
|
79599bf831 | ||
|
|
1ab67bc225 | ||
|
|
37df213771 | ||
|
|
d48ffdb14f | ||
|
|
766cdc9f59 | ||
|
|
21a40c9d14 | ||
|
|
9275e9d006 | ||
|
|
ef9fe025e0 | ||
|
|
05694a8cda | ||
|
|
e6304cb028 | ||
|
|
b2d00784a4 | ||
|
|
ae31ebdc33 | ||
|
|
a2d50b380f | ||
|
|
654e8fd13f | ||
|
|
bcd44e4b2d | ||
|
|
5200793744 | ||
|
|
abcb29391c | ||
|
|
6a682dc143 | ||
|
|
d93d30537f | ||
|
|
377e88ff36 | ||
|
|
1733290c02 | ||
|
|
e702ccc48a | ||
|
|
ba729c493b | ||
|
|
1c55950b7e | ||
|
|
18c8282bac | ||
|
|
1d20456853 | ||
|
|
7e32d0ae10 | ||
|
|
5d33e45eae | ||
|
|
1590930ef9 | ||
|
|
8186d34f4e |
@@ -35,6 +35,9 @@ module.exports = {
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
// disabled because XAPI objects are using camel case
|
||||
camelcase: ['off'],
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.4.1",
|
||||
"xen-api": "^0.25.1"
|
||||
"xen-api": "^0.27.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.9.0",
|
||||
"version": "0.10.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
@@ -28,8 +28,9 @@
|
||||
"execa": "^1.0.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"get-stream": "^4.0.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.1.0",
|
||||
@@ -40,6 +41,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/plugin-proposal-decorators": "^7.1.6",
|
||||
"@babel/plugin-proposal-function-bind": "^7.0.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.4",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
import getStream from 'get-stream'
|
||||
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import limit from 'limit-concurrency-decorator'
|
||||
import path from 'path'
|
||||
import synchronized from 'decorator-synchronized'
|
||||
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
|
||||
@@ -31,6 +32,7 @@ const computeRate = (hrtime: number[], size: number) => {
|
||||
}
|
||||
|
||||
const DEFAULT_TIMEOUT = 6e5 // 10 min
|
||||
const DEFAULT_MAX_PARALLEL_OPERATIONS = 10
|
||||
|
||||
const ignoreEnoent = error => {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
@@ -83,6 +85,25 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
;({ timeout: this._timeout = DEFAULT_TIMEOUT } = options)
|
||||
|
||||
const sharedLimit = limit(
|
||||
options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS
|
||||
)
|
||||
this.closeFile = sharedLimit(this.closeFile)
|
||||
this.getInfo = sharedLimit(this.getInfo)
|
||||
this.getSize = sharedLimit(this.getSize)
|
||||
this.list = sharedLimit(this.list)
|
||||
this.mkdir = sharedLimit(this.mkdir)
|
||||
this.openFile = sharedLimit(this.openFile)
|
||||
this.outputFile = sharedLimit(this.outputFile)
|
||||
this.read = sharedLimit(this.read)
|
||||
this.readFile = sharedLimit(this.readFile)
|
||||
this.rename = sharedLimit(this.rename)
|
||||
this.rmdir = sharedLimit(this.rmdir)
|
||||
this.truncate = sharedLimit(this.truncate)
|
||||
this.unlink = sharedLimit(this.unlink)
|
||||
this.write = sharedLimit(this.write)
|
||||
this.writeFile = sharedLimit(this.writeFile)
|
||||
}
|
||||
|
||||
// Public members
|
||||
|
||||
@@ -24,6 +24,19 @@ log.info('this information is relevant to the user')
|
||||
log.warn('something went wrong but did not prevent current action')
|
||||
log.error('something went wrong')
|
||||
log.fatal('service/app is going down')
|
||||
|
||||
// you can add contextual info
|
||||
log.debug('new API request', {
|
||||
method: 'foo',
|
||||
params: [ 'bar', 'baz' ]
|
||||
user: 'qux'
|
||||
})
|
||||
|
||||
// by convention, errors go into the `error` field
|
||||
log.error('could not join server', {
|
||||
error,
|
||||
server: 'example.org',
|
||||
})
|
||||
```
|
||||
|
||||
Then, at application level, configure the logs are handled:
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.12.1"
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import LEVELS, { NAMES } from '../levels'
|
||||
|
||||
// Bind console methods (necessary for browsers)
|
||||
/* eslint-disable no-console */
|
||||
const debugConsole = console.log.bind(console)
|
||||
const infoConsole = console.info.bind(console)
|
||||
const warnConsole = console.warn.bind(console)
|
||||
const errorConsole = console.error.bind(console)
|
||||
/* eslint-enable no-console */
|
||||
|
||||
const { ERROR, INFO, WARN } = LEVELS
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import splitHost from 'split-host' // eslint-disable-line node/no-extraneous-import node/no-missing-import
|
||||
import startsWith from 'lodash/startsWith'
|
||||
import { createClient, Facility, Severity, Transport } from 'syslog-client' // eslint-disable-line node/no-extraneous-import node/no-missing-import
|
||||
import splitHost from 'split-host'
|
||||
import { createClient, Facility, Severity, Transport } from 'syslog-client'
|
||||
|
||||
import LEVELS from '../levels'
|
||||
|
||||
@@ -19,10 +18,10 @@ const facility = Facility.User
|
||||
export default target => {
|
||||
const opts = {}
|
||||
if (target !== undefined) {
|
||||
if (startsWith(target, 'tcp://')) {
|
||||
if (target.startsWith('tcp://')) {
|
||||
target = target.slice(6)
|
||||
opts.transport = Transport.Tcp
|
||||
} else if (startsWith(target, 'udp://')) {
|
||||
} else if (target.startsWith('udp://')) {
|
||||
target = target.slice(6)
|
||||
opts.transport = Transport.Udp
|
||||
}
|
||||
|
||||
99
CHANGELOG.md
99
CHANGELOG.md
@@ -1,22 +1,115 @@
|
||||
# ChangeLog
|
||||
|
||||
## **next** (2019-05-14)
|
||||
## **next**
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Stats] Ability to display last day stats [#4160](https://github.com/vatesfr/xen-orchestra/issues/4160) (PR [#4168](https://github.com/vatesfr/xen-orchestra/pull/4168))
|
||||
- [Settings/servers] Display servers connection issues [#4300](https://github.com/vatesfr/xen-orchestra/issues/4300) (PR [#4310](https://github.com/vatesfr/xen-orchestra/pull/4310))
|
||||
- [VM] Permission to revert to any snapshot for VM operators [#3928](https://github.com/vatesfr/xen-orchestra/issues/3928) (PR [#4247](https://github.com/vatesfr/xen-orchestra/pull/4247))
|
||||
- [VM] Show current operations and progress [#3811](https://github.com/vatesfr/xen-orchestra/issues/3811) (PR [#3982](https://github.com/vatesfr/xen-orchestra/pull/3982))
|
||||
- [SR/General] Improve SR usage graph [#3608](https://github.com/vatesfr/xen-orchestra/issues/3608) (PR [#3830](https://github.com/vatesfr/xen-orchestra/pull/3830))
|
||||
- [Backup NG/New] Generate default schedule if no schedule is specified [#4036](https://github.com/vatesfr/xen-orchestra/issues/4036) (PR [#4183](https://github.com/vatesfr/xen-orchestra/pull/4183))
|
||||
- [Host/Advanced] Ability to edit iSCSI IQN [#4048](https://github.com/vatesfr/xen-orchestra/issues/4048) (PR [#4208](https://github.com/vatesfr/xen-orchestra/pull/4208))
|
||||
- [Backup NG] Ability to bypass unhealthy VDI chains check [#4324](https://github.com/vatesfr/xen-orchestra/issues/4324) (PR [#4340](https://github.com/vatesfr/xen-orchestra/pull/4340))
|
||||
- [Pool] Ability to add multiple hosts on the pool [#2402](https://github.com/vatesfr/xen-orchestra/issues/2402) (PR [#3716](https://github.com/vatesfr/xen-orchestra/pull/3716))
|
||||
- [VM/console] Multiline copy/pasting [#4261](https://github.com/vatesfr/xen-orchestra/issues/4261) (PR [#4341](https://github.com/vatesfr/xen-orchestra/pull/4341))
|
||||
- [VM,host] Improved state icons/pills (colors and tooltips) (PR [#4363](https://github.com/vatesfr/xen-orchestra/pull/4363))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Settings/Servers] Fix read-only setting toggling
|
||||
- [SDN Controller] Do not choose physical PIF without IP configuration for tunnels. (PR [#4319](https://github.com/vatesfr/xen-orchestra/pull/4319))
|
||||
- [Xen servers] Fix `no connection found for object` error if pool master is reinstalled [#4299](https://github.com/vatesfr/xen-orchestra/issues/4299) (PR [#4302](https://github.com/vatesfr/xen-orchestra/pull/4302))
|
||||
- [Backup-ng/restore] Display correct size for full VM backup [#4316](https://github.com/vatesfr/xen-orchestra/issues/4316) (PR [#4332](https://github.com/vatesfr/xen-orchestra/pull/4332))
|
||||
- [VM/tab-advanced] Fix CPU limits edition (PR [#4337](https://github.com/vatesfr/xen-orchestra/pull/4337))
|
||||
- [Remotes] Fix `EIO` errors due to massive parallel fs operations [#4323](https://github.com/vatesfr/xen-orchestra/issues/4323) (PR [#4330](https://github.com/vatesfr/xen-orchestra/pull/4330))
|
||||
- [VM/Advanced] Fix virtualization mode switch (PV/HVM) (PR [#4349](https://github.com/vatesfr/xen-orchestra/pull/4349))
|
||||
- [Task] fix hidden notification by search field [#3874](https://github.com/vatesfr/xen-orchestra/issues/3874) (PR [#4305](https://github.com/vatesfr/xen-orchestra/pull/4305)
|
||||
- [VM] Fail to change affinity (PR [#4361](https://github.com/vatesfr/xen-orchestra/pull/4361)
|
||||
- [VM] Number of CPUs not correctly changed on running VMs (PR [#4360](https://github.com/vatesfr/xen-orchestra/pull/4360)
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/fs v0.10.1
|
||||
- xo-server-sdn-controller v0.1.1
|
||||
- xen-api v0.27.1
|
||||
- xo-server v5.46.0
|
||||
- xo-web v5.46.0
|
||||
|
||||
## **5.36.0** (2019-06-27)
|
||||
|
||||

|
||||
|
||||
### Highlights
|
||||
|
||||
- [SR/new] Create ZFS storage [#4260](https://github.com/vatesfr/xen-orchestra/issues/4260) (PR [#4266](https://github.com/vatesfr/xen-orchestra/pull/4266))
|
||||
- [Host/advanced] Fix host CPU hyperthreading detection [#4262](https://github.com/vatesfr/xen-orchestra/issues/4262) (PR [#4285](https://github.com/vatesfr/xen-orchestra/pull/4285))
|
||||
- [VM/Advanced] Ability to use UEFI instead of BIOS [#4264](https://github.com/vatesfr/xen-orchestra/issues/4264) (PR [#4268](https://github.com/vatesfr/xen-orchestra/pull/4268))
|
||||
- [Backup-ng/restore] Display size for full VM backup [#4009](https://github.com/vatesfr/xen-orchestra/issues/4009) (PR [#4245](https://github.com/vatesfr/xen-orchestra/pull/4245))
|
||||
- [Sr/new] Ability to select NFS version when creating NFS storage [#3951](https://github.com/vatesfr/xen-orchestra/issues/3951) (PR [#4277](https://github.com/vatesfr/xen-orchestra/pull/4277))
|
||||
- [Host/storages, SR/hosts] Display PBD details [#4264](https://github.com/vatesfr/xen-orchestra/issues/4161) (PR [#4268](https://github.com/vatesfr/xen-orchestra/pull/4284))
|
||||
- [auth-saml] Improve compatibility with Microsoft Azure Active Directory (PR [#4294](https://github.com/vatesfr/xen-orchestra/pull/4294))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Host] Display warning when "Citrix Hypervisor" license has restrictions [#4251](https://github.com/vatesfr/xen-orchestra/issues/4164) (PR [#4235](https://github.com/vatesfr/xen-orchestra/pull/4279))
|
||||
- [VM/Backup] Create backup bulk action [#2573](https://github.com/vatesfr/xen-orchestra/issues/2573) (PR [#4257](https://github.com/vatesfr/xen-orchestra/pull/4257))
|
||||
- [Host] Display warning when host's time differs too much from XOA's time [#4113](https://github.com/vatesfr/xen-orchestra/issues/4113) (PR [#4173](https://github.com/vatesfr/xen-orchestra/pull/4173))
|
||||
- [VM/network] Display and set bandwidth rate-limit of a VIF [#4215](https://github.com/vatesfr/xen-orchestra/issues/4215) (PR [#4293](https://github.com/vatesfr/xen-orchestra/pull/4293))
|
||||
- [SDN Controller] New plugin which enables creating pool-wide private networks [xcp-ng/xcp#175](https://github.com/xcp-ng/xcp/issues/175) (PR [#4269](https://github.com/vatesfr/xen-orchestra/pull/4269))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [XOA] Don't require editing the _email_ field in case of re-registration (PR [#4259](https://github.com/vatesfr/xen-orchestra/pull/4259))
|
||||
- [Metadata backup] Missing XAPIs should trigger a failure job [#4281](https://github.com/vatesfr/xen-orchestra/issues/4281) (PR [#4283](https://github.com/vatesfr/xen-orchestra/pull/4283))
|
||||
- [iSCSI] Fix fibre channel paths display [#4291](https://github.com/vatesfr/xen-orchestra/issues/4291) (PR [#4303](https://github.com/vatesfr/xen-orchestra/pull/4303))
|
||||
- [New VM] Fix tooltips not displayed on disabled elements in some browsers (e.g. Google Chrome) [#4304](https://github.com/vatesfr/xen-orchestra/issues/4304) (PR [#4309](https://github.com/vatesfr/xen-orchestra/pull/4309))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-auth-ldap v0.6.5
|
||||
- xen-api v0.26.0
|
||||
- xo-server-sdn-controller v0.1
|
||||
- xo-server-auth-saml v0.6.0
|
||||
- xo-server-backup-reports v0.16.2
|
||||
- xo-server v5.44.0
|
||||
- xo-web v5.44.0
|
||||
|
||||
## **5.35.0** (2019-05-29)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/general] Display 'Started... ago' instead of 'Halted... ago' for paused state [#3750](https://github.com/vatesfr/xen-orchestra/issues/3750) (PR [#4170](https://github.com/vatesfr/xen-orchestra/pull/4170))
|
||||
- [Metadata backup] Ability to define when the backup report will be sent (PR [#4149](https://github.com/vatesfr/xen-orchestra/pull/4149))
|
||||
- [XOA/Update] Ability to select release channel [#4200](https://github.com/vatesfr/xen-orchestra/issues/4200) (PR [#4202](https://github.com/vatesfr/xen-orchestra/pull/4202))
|
||||
- [User] Forget connection tokens on password change or on demand [#4214](https://github.com/vatesfr/xen-orchestra/issues/4214) (PR [#4224](https://github.com/vatesfr/xen-orchestra/pull/4224))
|
||||
- [Settings/Logs] LICENCE_RESTRICTION errors: suggest XCP-ng as an Open Source alternative [#3876](https://github.com/vatesfr/xen-orchestra/issues/3876) (PR [#4238](https://github.com/vatesfr/xen-orchestra/pull/4238))
|
||||
- [VM/Migrate] Display VDI size on migrate modal [#2534](https://github.com/vatesfr/xen-orchestra/issues/2534) (PR [#4250](https://github.com/vatesfr/xen-orchestra/pull/4250))
|
||||
- [Host] Display hyperthreading status on advanced tab [#4262](https://github.com/vatesfr/xen-orchestra/issues/4262) (PR [#4263](https://github.com/vatesfr/xen-orchestra/pull/4263))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Pool/Patches] Fix "an error has occurred" in "Applied patches" [#4192](https://github.com/vatesfr/xen-orchestra/issues/4192) (PR [#4193](https://github.com/vatesfr/xen-orchestra/pull/4193))
|
||||
- [Backup NG] Fix report sent even though "Never" is selected [#4092](https://github.com/vatesfr/xen-orchestra/issues/4092) (PR [#4178](https://github.com/vatesfr/xen-orchestra/pull/4178))
|
||||
- [Remotes] Fix issues after a config import (PR [#4197](https://github.com/vatesfr/xen-orchestra/pull/4197))
|
||||
- [Charts] Fixed the chart lines sometimes changing order/color (PR [#4221](https://github.com/vatesfr/xen-orchestra/pull/4221))
|
||||
- Prevent non-admin users to access admin pages with URL (PR [#4220](https://github.com/vatesfr/xen-orchestra/pull/4220))
|
||||
- [Upgrade] Fix alert before upgrade while running backup jobs [#4164](https://github.com/vatesfr/xen-orchestra/issues/4164) (PR [#4235](https://github.com/vatesfr/xen-orchestra/pull/4235))
|
||||
- [Import] Fix import OVA files (PR [#4232](https://github.com/vatesfr/xen-orchestra/pull/4232))
|
||||
- [VM/network] Fix duplicate IPv4 (PR [#4239](https://github.com/vatesfr/xen-orchestra/pull/4239))
|
||||
- [Remotes] Fix disconnected remotes which may appear to work
|
||||
- [Host] Fix incorrect hypervisor name [#4246](https://github.com/vatesfr/xen-orchestra/issues/4246) (PR [#4248](https://github.com/vatesfr/xen-orchestra/pull/4248))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-backup-reports v0.16.1
|
||||
- @xen-orchestra/fs v0.9.0
|
||||
- vhd-lib v0.7.0
|
||||
- xo-server v5.41.0
|
||||
- xo-web v5.41.0
|
||||
- xo-server v5.42.1
|
||||
- xo-web v5.42.1
|
||||
|
||||
## **5.34.0** (2019-04-30)
|
||||
|
||||
|
||||
@@ -1,25 +1,27 @@
|
||||
> This file contains all changes that have not been released yet.
|
||||
>
|
||||
> Keep in mind the changelog is addressed to **users** and should be
|
||||
> understandable by them.
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/general] Display 'Started... ago' instead of 'Halted... ago' for paused state [#3750](https://github.com/vatesfr/xen-orchestra/issues/3750) (PR [#4170](https://github.com/vatesfr/xen-orchestra/pull/4170))
|
||||
- [Metadata backup] Ability to define when the backup report will be sent (PR [#4149](https://github.com/vatesfr/xen-orchestra/pull/4149))
|
||||
- [XOA/Update] Ability to select release channel [#4200](https://github.com/vatesfr/xen-orchestra/issues/4200) (PR [#4202](https://github.com/vatesfr/xen-orchestra/pull/4202))
|
||||
- [User] Forget connection tokens on password change or on demand [#4214](https://github.com/vatesfr/xen-orchestra/issues/4214) (PR [#4224](https://github.com/vatesfr/xen-orchestra/pull/4224))
|
||||
- [Settings/Logs] LICENCE_RESTRICTION errors: suggest XCP-ng as an Open Source alternative [#3876](https://github.com/vatesfr/xen-orchestra/issues/3876) (PR [#4238](https://github.com/vatesfr/xen-orchestra/pull/4238))
|
||||
- [VM/Migrate] Display VDI size on migrate modal [#2534](https://github.com/vatesfr/xen-orchestra/issues/2534) (PR [#4250](https://github.com/vatesfr/xen-orchestra/pull/4250))
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [VM/copy] Only show zstd option when it's supported [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PR [#4326](https://github.com/vatesfr/xen-orchestra/pull/4326))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Charts] Fixed the chart lines sometimes changing order/color (PR [#4221](https://github.com/vatesfr/xen-orchestra/pull/4221))
|
||||
- Prevent non-admin users to access admin pages with URL
|
||||
- [Upgrade] Fix alert before upgrade while running backup jobs (PR [#4235](https://github.com/vatesfr/xen-orchestra/pull/4235))
|
||||
- [Import] Fix import OVA files (PR [#4232](https://github.com/vatesfr/xen-orchestra/pull/4232))
|
||||
- [VM/network] Fix duplicate IPv4 (PR [#4239](https://github.com/vatesfr/xen-orchestra/pull/4239))
|
||||
- [Remotes] Fix disconnected remotes which may appear to work
|
||||
- [Host] Fix incorrect hypervisor name [#4246](https://github.com/vatesfr/xen-orchestra/issues/4246) (PR [#4248](https://github.com/vatesfr/xen-orchestra/pull/4248))
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [SDN Controller] Better detect host shutting down to adapt network topology (PR [#4314](https://github.com/vatesfr/xen-orchestra/pull/4314))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server v5.42.0
|
||||
- xo-web v5.42.0
|
||||
> Packages will be released in the order they are here, therefore, they should
|
||||
> be listed by inverse order of dependency.
|
||||
>
|
||||
> Rule of thumb: add packages on top.
|
||||
|
||||
- xo-server-sdn-controller v0.1.2
|
||||
- xo-server v5.47.0
|
||||
- xo-web v5.47.0
|
||||
|
||||
12
package.json
12
package.json
@@ -6,8 +6,8 @@
|
||||
"babel-eslint": "^10.0.1",
|
||||
"babel-jest": "^24.1.0",
|
||||
"benchmark": "^2.1.4",
|
||||
"eslint": "^5.1.0",
|
||||
"eslint-config-prettier": "^4.1.0",
|
||||
"eslint": "^6.0.1",
|
||||
"eslint-config-prettier": "^6.0.0",
|
||||
"eslint-config-standard": "12.0.0",
|
||||
"eslint-config-standard-jsx": "^6.0.2",
|
||||
"eslint-plugin-eslint-comments": "^3.1.1",
|
||||
@@ -17,13 +17,13 @@
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.98.0",
|
||||
"globby": "^9.0.0",
|
||||
"husky": "^2.2.0",
|
||||
"flow-bin": "^0.102.0",
|
||||
"globby": "^10.0.0",
|
||||
"husky": "^3.0.0",
|
||||
"jest": "^24.1.0",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^1.10.2",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"sorted-object": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.9.0",
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"cli-progress": "^2.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
@@ -40,9 +40,9 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^1.0.0",
|
||||
"execa": "^2.0.2",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"rimraf": "^2.6.1",
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1"
|
||||
},
|
||||
@@ -35,12 +35,12 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"@xen-orchestra/fs": "^0.9.0",
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^1.0.0",
|
||||
"execa": "^2.0.2",
|
||||
"fs-promise": "^2.0.0",
|
||||
"get-stream": "^4.0.0",
|
||||
"get-stream": "^5.1.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"rimraf": "^2.6.2",
|
||||
|
||||
@@ -364,9 +364,7 @@ export default class Vhd {
|
||||
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
|
||||
|
||||
debug(
|
||||
`writeBlockSectors at ${offset} block=${
|
||||
block.id
|
||||
}, sectors=${beginSectorId}...${endSectorId}`
|
||||
`writeBlockSectors at ${offset} block=${block.id}, sectors=${beginSectorId}...${endSectorId}`
|
||||
)
|
||||
|
||||
for (let i = beginSectorId; i < endSectorId; ++i) {
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^0.25.1"
|
||||
"xen-api": "^0.27.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
|
||||
@@ -82,7 +82,7 @@ console.log(xapi.pool.$master.$resident_VMs[0].name_label)
|
||||
A CLI is provided to help exploration and discovery of the XAPI.
|
||||
|
||||
```
|
||||
> xen-api https://xen1.company.net root
|
||||
> xen-api xen1.company.net root
|
||||
Password: ******
|
||||
root@xen1.company.net> xapi.status
|
||||
'connected'
|
||||
@@ -92,6 +92,14 @@ root@xen1.company.net> xapi.pool.$master.name_label
|
||||
'xen1'
|
||||
```
|
||||
|
||||
You can optionally prefix the address by a protocol: `https://` (default) or `http://`.
|
||||
|
||||
In case of error due to invalid or self-signed certificates you can use the `--allow-unauthorized` flag (or `--au`):
|
||||
|
||||
```
|
||||
> xen-api --au xen1.company.net root
|
||||
```
|
||||
|
||||
To ease searches, `find()` and `findAll()` functions are available:
|
||||
|
||||
```
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.25.1",
|
||||
"version": "0.27.1",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
@@ -46,7 +46,7 @@
|
||||
"make-error": "^1.3.0",
|
||||
"minimist": "^1.2.0",
|
||||
"ms": "^2.1.1",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"pw": "0.0.4",
|
||||
"xmlrpc": "^1.3.2",
|
||||
"xo-collection": "^0.4.1"
|
||||
|
||||
@@ -99,6 +99,9 @@ export class Xapi extends EventEmitter {
|
||||
this._sessionId = undefined
|
||||
this._status = DISCONNECTED
|
||||
|
||||
this._watchEventsError = undefined
|
||||
this._lastEventFetchedTimestamp = undefined
|
||||
|
||||
this._debounce = opts.debounce ?? 200
|
||||
this._objects = new Collection()
|
||||
this._objectsByRef = { __proto__: null }
|
||||
@@ -168,22 +171,6 @@ export class Xapi extends EventEmitter {
|
||||
try {
|
||||
await this._sessionOpen()
|
||||
|
||||
// Uses introspection to list available types.
|
||||
const types = (this._types = (await this._interruptOnDisconnect(
|
||||
this._call('system.listMethods')
|
||||
))
|
||||
.filter(isGetAllRecordsMethod)
|
||||
.map(method => method.slice(0, method.indexOf('.'))))
|
||||
this._lcToTypes = { __proto__: null }
|
||||
types.forEach(type => {
|
||||
const lcType = type.toLowerCase()
|
||||
if (lcType !== type) {
|
||||
this._lcToTypes[lcType] = type
|
||||
}
|
||||
})
|
||||
|
||||
this._pool = (await this.getAllRecords('pool'))[0]
|
||||
|
||||
debug('%s: connected', this._humanId)
|
||||
this._status = CONNECTED
|
||||
this._resolveConnected()
|
||||
@@ -495,6 +482,14 @@ export class Xapi extends EventEmitter {
|
||||
return this._objectsFetched
|
||||
}
|
||||
|
||||
get lastEventFetchedTimestamp() {
|
||||
return this._lastEventFetchedTimestamp
|
||||
}
|
||||
|
||||
get watchEventsError() {
|
||||
return this._watchEventsError
|
||||
}
|
||||
|
||||
// ensure we have received all events up to this call
|
||||
//
|
||||
// optionally returns the up to date object for the given ref
|
||||
@@ -739,6 +734,28 @@ export class Xapi extends EventEmitter {
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const oldPoolRef = this._pool?.$ref
|
||||
this._pool = (await this.getAllRecords('pool'))[0]
|
||||
|
||||
// if the pool ref has changed, it means that the XAPI has been restarted or
|
||||
// it's not the same XAPI, we need to refetch the available types and reset
|
||||
// the event loop in that case
|
||||
if (this._pool.$ref !== oldPoolRef) {
|
||||
// Uses introspection to list available types.
|
||||
const types = (this._types = (await this._interruptOnDisconnect(
|
||||
this._call('system.listMethods')
|
||||
))
|
||||
.filter(isGetAllRecordsMethod)
|
||||
.map(method => method.slice(0, method.indexOf('.'))))
|
||||
this._lcToTypes = { __proto__: null }
|
||||
types.forEach(type => {
|
||||
const lcType = type.toLowerCase()
|
||||
if (lcType !== type) {
|
||||
this._lcToTypes[lcType] = type
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
_setUrl(url) {
|
||||
@@ -936,21 +953,28 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
let result
|
||||
try {
|
||||
result = await this._sessionCall(
|
||||
// don't use _sessionCall because a session failure should break the
|
||||
// loop and trigger a complete refetch
|
||||
result = await this._call(
|
||||
'event.from',
|
||||
[
|
||||
this._sessionId,
|
||||
types,
|
||||
fromToken,
|
||||
EVENT_TIMEOUT + 0.1, // must be float for XML-RPC transport
|
||||
],
|
||||
EVENT_TIMEOUT * 1e3 * 1.1
|
||||
)
|
||||
this._lastEventFetchedTimestamp = Date.now()
|
||||
this._watchEventsError = undefined
|
||||
} catch (error) {
|
||||
if (error?.code === 'EVENTS_LOST') {
|
||||
const code = error?.code
|
||||
if (code === 'EVENTS_LOST' || code === 'SESSION_INVALID') {
|
||||
// eslint-disable-next-line no-labels
|
||||
continue mainLoop
|
||||
}
|
||||
|
||||
this._watchEventsError = error
|
||||
console.warn('_watchEvents', error)
|
||||
await pDelay(this._eventPollDelay)
|
||||
continue
|
||||
@@ -1059,9 +1083,14 @@ export class Xapi extends EventEmitter {
|
||||
}
|
||||
}
|
||||
|
||||
props[`add_to_${field}`] = function(...values) {
|
||||
props[`add_${field}`] = function(value) {
|
||||
return xapi
|
||||
.call(`${type}.add_${field}`, this.$ref, values)
|
||||
.call(`${type}.add_${field}`, this.$ref, value)
|
||||
.then(noop)
|
||||
}
|
||||
props[`remove_${field}`] = function(value) {
|
||||
return xapi
|
||||
.call(`${type}.remove_${field}`, this.$ref, value)
|
||||
.then(noop)
|
||||
}
|
||||
} else if (value !== null && typeof value === 'object') {
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
"nice-pipe": "0.0.0",
|
||||
"pretty-ms": "^4.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"pump": "^3.0.0",
|
||||
"pw": "^0.0.4",
|
||||
"strip-indent": "^2.0.0",
|
||||
|
||||
@@ -24,7 +24,6 @@ const nicePipe = require('nice-pipe')
|
||||
const pairs = require('lodash/toPairs')
|
||||
const pick = require('lodash/pick')
|
||||
const pump = require('pump')
|
||||
const startsWith = require('lodash/startsWith')
|
||||
const prettyMs = require('pretty-ms')
|
||||
const progressStream = require('progress-stream')
|
||||
const pw = require('pw')
|
||||
@@ -81,7 +80,7 @@ function parseParameters(args) {
|
||||
const name = matches[1]
|
||||
let value = matches[2]
|
||||
|
||||
if (startsWith(value, 'json:')) {
|
||||
if (value.startsWith('json:')) {
|
||||
value = JSON.parse(value.slice(5))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import JsonRpcWebSocketClient, { OPEN, CLOSED } from 'jsonrpc-websocket-client'
|
||||
import { BaseError } from 'make-error'
|
||||
import { startsWith } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -35,7 +34,7 @@ export default class Xo extends JsonRpcWebSocketClient {
|
||||
}
|
||||
|
||||
call(method, args, i) {
|
||||
if (startsWith(method, 'session.')) {
|
||||
if (method.startsWith('session.')) {
|
||||
return Promise.reject(
|
||||
new XoError('session.*() methods are disabled from this interface')
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-ldap",
|
||||
"version": "0.6.4",
|
||||
"version": "0.6.5",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "LDAP authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -39,7 +39,7 @@
|
||||
"inquirer": "^6.0.0",
|
||||
"ldapjs": "^1.0.1",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.12.1"
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -230,10 +230,9 @@ class AuthLdap {
|
||||
logger(`attempting to bind as ${entry.objectName}`)
|
||||
await bind(entry.objectName, password)
|
||||
logger(
|
||||
`successfully bound as ${
|
||||
entry.objectName
|
||||
} => ${username} authenticated`
|
||||
`successfully bound as ${entry.objectName} => ${username} authenticated`
|
||||
)
|
||||
logger(JSON.stringify(entry, null, 2))
|
||||
return { username }
|
||||
} catch (error) {
|
||||
logger(`failed to bind as ${entry.objectName}: ${error.message}`)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-saml",
|
||||
"version": "0.5.3",
|
||||
"version": "0.6.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "SAML authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -33,7 +33,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"passport-saml": "^1.0.0"
|
||||
"passport-saml": "^1.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -24,7 +24,10 @@ export const configurationSchema = {
|
||||
},
|
||||
usernameField: {
|
||||
title: 'Username field',
|
||||
description: 'Field to use as the XO username',
|
||||
description: `Field to use as the XO username
|
||||
|
||||
You should try \`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress\` if you are using Microsoft Azure Active Directory.
|
||||
`,
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-backup-reports",
|
||||
"version": "0.16.1",
|
||||
"version": "0.16.2",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Backup reports plugin for XO-Server",
|
||||
"keywords": [
|
||||
|
||||
@@ -142,12 +142,14 @@ const getErrorMarkdown = task => {
|
||||
|
||||
const MARKDOWN_BY_TYPE = {
|
||||
pool(task, { formatDate }) {
|
||||
const { pool, poolMaster = {} } = task.data
|
||||
const { id, pool = {}, poolMaster = {} } = task.data
|
||||
const name = pool.name_label || poolMaster.name_label || UNKNOWN_ITEM
|
||||
|
||||
return {
|
||||
body: [
|
||||
`- **UUID**: ${pool.uuid}`,
|
||||
pool.uuid !== undefined
|
||||
? `- **UUID**: ${pool.uuid}`
|
||||
: `- **ID**: ${id}`,
|
||||
...getTemporalDataMarkdown(task.end, task.start, formatDate),
|
||||
getErrorMarkdown(task),
|
||||
],
|
||||
@@ -355,9 +357,7 @@ class BackupReportsXoPlugin {
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Metadata backup report for ${
|
||||
log.jobName
|
||||
}`
|
||||
? `[Xen Orchestra] [Success] Metadata backup report for ${log.jobName}`
|
||||
: `[Xen Orchestra] [${log.status}] Metadata backup report for ${
|
||||
log.jobName
|
||||
} - ${nagiosText.join(' ')}`,
|
||||
@@ -391,9 +391,7 @@ class BackupReportsXoPlugin {
|
||||
} − Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
|
||||
markdown: toMarkdown(markdown),
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${
|
||||
log.status
|
||||
}] Backup report for ${jobName} - Error : ${log.result.message}`,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${log.status}] Backup report for ${jobName} - Error : ${log.result.message}`,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -711,9 +709,7 @@ class BackupReportsXoPlugin {
|
||||
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
|
||||
markdown,
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${
|
||||
error.message
|
||||
}`,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${error.message}`,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -189,9 +189,7 @@ export default class DensityPlan extends Plan {
|
||||
const { vm, destination } = move
|
||||
const xapiDest = this.xo.getXapi(destination)
|
||||
debug(
|
||||
`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${
|
||||
vm.$container
|
||||
}).`
|
||||
`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${vm.$container}).`
|
||||
)
|
||||
return xapiDest.migrateVm(
|
||||
vm._xapiId,
|
||||
|
||||
@@ -126,9 +126,7 @@ export default class PerformancePlan extends Plan {
|
||||
destinationAverages.memoryFree -= vmAverages.memory
|
||||
|
||||
debug(
|
||||
`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${
|
||||
exceededHost.id
|
||||
}).`
|
||||
`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id}).`
|
||||
)
|
||||
optimizationsCount++
|
||||
|
||||
@@ -143,9 +141,7 @@ export default class PerformancePlan extends Plan {
|
||||
|
||||
await Promise.all(promises)
|
||||
debug(
|
||||
`Performance mode: ${optimizationsCount} optimizations for Host (${
|
||||
exceededHost.id
|
||||
}).`
|
||||
`Performance mode: ${optimizationsCount} optimizations for Host (${exceededHost.id}).`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,9 +183,7 @@ export const configurationSchema = {
|
||||
description: Object.keys(HOST_FUNCTIONS)
|
||||
.map(
|
||||
k =>
|
||||
` * ${k} (${HOST_FUNCTIONS[k].unit}): ${
|
||||
HOST_FUNCTIONS[k].description
|
||||
}`
|
||||
` * ${k} (${HOST_FUNCTIONS[k].unit}): ${HOST_FUNCTIONS[k].description}`
|
||||
)
|
||||
.join('\n'),
|
||||
type: 'string',
|
||||
@@ -233,9 +231,7 @@ export const configurationSchema = {
|
||||
description: Object.keys(VM_FUNCTIONS)
|
||||
.map(
|
||||
k =>
|
||||
` * ${k} (${VM_FUNCTIONS[k].unit}): ${
|
||||
VM_FUNCTIONS[k].description
|
||||
}`
|
||||
` * ${k} (${VM_FUNCTIONS[k].unit}): ${VM_FUNCTIONS[k].description}`
|
||||
)
|
||||
.join('\n'),
|
||||
type: 'string',
|
||||
@@ -284,9 +280,7 @@ export const configurationSchema = {
|
||||
description: Object.keys(SR_FUNCTIONS)
|
||||
.map(
|
||||
k =>
|
||||
` * ${k} (${SR_FUNCTIONS[k].unit}): ${
|
||||
SR_FUNCTIONS[k].description
|
||||
}`
|
||||
` * ${k} (${SR_FUNCTIONS[k].unit}): ${SR_FUNCTIONS[k].description}`
|
||||
)
|
||||
.join('\n'),
|
||||
type: 'string',
|
||||
@@ -414,9 +408,7 @@ ${monitorBodies.join('\n')}`
|
||||
}
|
||||
|
||||
_parseDefinition(definition) {
|
||||
const alarmId = `${definition.objectType}|${definition.variableName}|${
|
||||
definition.alarmTriggerLevel
|
||||
}`
|
||||
const alarmId = `${definition.objectType}|${definition.variableName}|${definition.alarmTriggerLevel}`
|
||||
const typeFunction =
|
||||
TYPE_FUNCTION_MAP[definition.objectType][definition.variableName]
|
||||
const parseData = (result, uuid) => {
|
||||
@@ -468,9 +460,7 @@ ${monitorBodies.join('\n')}`
|
||||
...definition,
|
||||
alarmId,
|
||||
vmFunction: typeFunction,
|
||||
title: `${typeFunction.name} ${definition.comparator} ${
|
||||
definition.alarmTriggerLevel
|
||||
}${typeFunction.unit}`,
|
||||
title: `${typeFunction.name} ${definition.comparator} ${definition.alarmTriggerLevel}${typeFunction.unit}`,
|
||||
snapshot: async () => {
|
||||
return Promise.all(
|
||||
map(definition.uuids, async uuid => {
|
||||
@@ -664,9 +654,7 @@ ${entry.listItem}
|
||||
subject: `[Xen Orchestra] − Performance Alert ${subjectSuffix}`,
|
||||
markdown:
|
||||
markdownBody +
|
||||
`\n\n\nSent from Xen Orchestra [perf-alert plugin](${
|
||||
this._configuration.baseUrl
|
||||
}#/settings/plugins)\n`,
|
||||
`\n\n\nSent from Xen Orchestra [perf-alert plugin](${this._configuration.baseUrl}#/settings/plugins)\n`,
|
||||
})
|
||||
} else {
|
||||
throw new Error('The email alert system has a configuration issue.')
|
||||
|
||||
3
packages/xo-server-sdn-controller/.babelrc.js
Normal file
3
packages/xo-server-sdn-controller/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
43
packages/xo-server-sdn-controller/README.md
Normal file
43
packages/xo-server-sdn-controller/README.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# xo-server-sdn-controller [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||
XO Server plugin that allows the creation of pool-wide private networks.
|
||||
|
||||
## Install
|
||||
|
||||
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
|
||||
|
||||
## Usage
|
||||
|
||||
### Network creation
|
||||
|
||||
In the network creation view, select a `pool` and `Private network`.
|
||||
Create the network.
|
||||
|
||||
Choice is offer between `GRE` and `VxLAN`, if `VxLAN` is chosen, then the port 4789 must be open for UDP traffic.
|
||||
The following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `VxLAN` is wanted:
|
||||
`-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m udp --dport 4789 -j ACCEPT`
|
||||
|
||||
### Configuration
|
||||
|
||||
Like all other xo-server plugins, it can be configured directly via
|
||||
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
|
||||
|
||||
The plugin's configuration contains:
|
||||
- `cert-dir`: A path where to find the certificates to create SSL connections with the hosts.
|
||||
If none is provided, the plugin will create its own self-signed certificates.
|
||||
- `override-certs:` Whether or not to uninstall an already existing SDN controller CA certificate in order to replace it by the plugin's one.
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
AGPL3 © [Vates SAS](http://vates.fr)
|
||||
36
packages/xo-server-sdn-controller/package.json
Normal file
36
packages/xo-server-sdn-controller/package.json
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"name": "xo-server-sdn-controller",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-sdn-controller",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-server-sdn-controller",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"main": "./dist",
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.4.4",
|
||||
"@babel/core": "^7.4.4",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.4.4",
|
||||
"cross-env": "^5.2.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"lodash": "^4.17.11",
|
||||
"node-openssl-cert": "^0.0.84",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
862
packages/xo-server-sdn-controller/src/index.js
Normal file
862
packages/xo-server-sdn-controller/src/index.js
Normal file
@@ -0,0 +1,862 @@
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import NodeOpenssl from 'node-openssl-cert'
|
||||
import { access, constants, readFile, writeFile } from 'fs'
|
||||
import { EventEmitter } from 'events'
|
||||
import { filter, find, forOwn, map } from 'lodash'
|
||||
import { fromCallback, fromEvent } from 'promise-toolbox'
|
||||
import { join } from 'path'
|
||||
|
||||
import { OvsdbClient } from './ovsdb-client'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller')
|
||||
|
||||
const PROTOCOL = 'pssl'
|
||||
|
||||
const CA_CERT = 'ca-cert.pem'
|
||||
const CLIENT_KEY = 'client-key.pem'
|
||||
const CLIENT_CERT = 'client-cert.pem'
|
||||
|
||||
const SDN_CONTROLLER_CERT = 'sdn-controller-ca.pem'
|
||||
|
||||
const NB_DAYS = 9999
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export const configurationSchema = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
'cert-dir': {
|
||||
description: `Full path to a directory where to find: \`client-cert.pem\`,
|
||||
\`client-key.pem\` and \`ca-cert.pem\` to create ssl connections with hosts.
|
||||
If none is provided, the plugin will create its own self-signed certificates.`,
|
||||
|
||||
type: 'string',
|
||||
},
|
||||
'override-certs': {
|
||||
description: `Replace already existing SDN controller CA certificate`,
|
||||
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
async function fileWrite(path, data) {
|
||||
await fromCallback(writeFile, path, data)
|
||||
}
|
||||
|
||||
async function fileRead(path) {
|
||||
const result = await fromCallback(readFile, path)
|
||||
return result
|
||||
}
|
||||
|
||||
async function fileExists(path) {
|
||||
try {
|
||||
await fromCallback(access, path, constants.F_OK)
|
||||
} catch (error) {
|
||||
if (error.code === 'ENOENT') {
|
||||
return false
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
class SDNController extends EventEmitter {
|
||||
constructor({ xo, getDataDir }) {
|
||||
super()
|
||||
|
||||
this._xo = xo
|
||||
|
||||
this._getDataDir = getDataDir
|
||||
|
||||
this._clientKey = null
|
||||
this._clientCert = null
|
||||
this._caCert = null
|
||||
|
||||
this._poolNetworks = []
|
||||
this._ovsdbClients = []
|
||||
this._newHosts = []
|
||||
|
||||
this._networks = new Map()
|
||||
this._starCenters = new Map()
|
||||
|
||||
this._cleaners = []
|
||||
this._objectsAdded = this._objectsAdded.bind(this)
|
||||
this._objectsUpdated = this._objectsUpdated.bind(this)
|
||||
|
||||
this._overrideCerts = false
|
||||
|
||||
this._unsetApiMethod = null
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async configure(configuration) {
|
||||
this._overrideCerts = configuration['override-certs']
|
||||
let certDirectory = configuration['cert-dir']
|
||||
|
||||
if (certDirectory == null) {
|
||||
log.debug(`No cert-dir provided, using default self-signed certificates`)
|
||||
certDirectory = await this._getDataDir()
|
||||
|
||||
if (!(await fileExists(join(certDirectory, CA_CERT)))) {
|
||||
// If one certificate doesn't exist, none should
|
||||
assert(
|
||||
!(await fileExists(join(certDirectory, CLIENT_KEY))),
|
||||
`${CLIENT_KEY} should not exist`
|
||||
)
|
||||
assert(
|
||||
!(await fileExists(join(certDirectory, CLIENT_CERT))),
|
||||
`${CLIENT_CERT} should not exist`
|
||||
)
|
||||
|
||||
log.debug(`No default self-signed certificates exists, creating them`)
|
||||
await this._generateCertificatesAndKey(certDirectory)
|
||||
}
|
||||
}
|
||||
// TODO: verify certificates and create new certificates if needed
|
||||
|
||||
;[this._clientKey, this._clientCert, this._caCert] = await Promise.all([
|
||||
fileRead(join(certDirectory, CLIENT_KEY)),
|
||||
fileRead(join(certDirectory, CLIENT_CERT)),
|
||||
fileRead(join(certDirectory, CA_CERT)),
|
||||
])
|
||||
|
||||
this._ovsdbClients.forEach(client => {
|
||||
client.updateCertificates(this._clientKey, this._clientCert, this._caCert)
|
||||
})
|
||||
const updatedPools = []
|
||||
for (const poolNetwork of this._poolNetworks) {
|
||||
if (updatedPools.includes(poolNetwork.pool)) {
|
||||
continue
|
||||
}
|
||||
|
||||
const xapi = this._xo.getXapi(poolNetwork.pool)
|
||||
await this._installCaCertificateIfNeeded(xapi)
|
||||
updatedPools.push(poolNetwork.pool)
|
||||
}
|
||||
}
|
||||
|
||||
async load() {
|
||||
const createPrivateNetwork = this._createPrivateNetwork.bind(this)
|
||||
createPrivateNetwork.description =
|
||||
'Creates a pool-wide private network on a selected pool'
|
||||
createPrivateNetwork.params = {
|
||||
poolId: { type: 'string' },
|
||||
networkName: { type: 'string' },
|
||||
networkDescription: { type: 'string' },
|
||||
encapsulation: { type: 'string' },
|
||||
}
|
||||
createPrivateNetwork.resolve = {
|
||||
xoPool: ['poolId', 'pool', ''],
|
||||
}
|
||||
this._unsetApiMethod = this._xo.addApiMethod(
|
||||
'plugin.SDNController.createPrivateNetwork',
|
||||
createPrivateNetwork
|
||||
)
|
||||
|
||||
// FIXME: we should monitor when xapis are added/removed
|
||||
forOwn(this._xo.getAllXapis(), async xapi => {
|
||||
await xapi.objectsFetched
|
||||
|
||||
if (this._setControllerNeeded(xapi) === false) {
|
||||
this._cleaners.push(await this._manageXapi(xapi))
|
||||
|
||||
const hosts = filter(xapi.objects.all, { $type: 'host' })
|
||||
await Promise.all(
|
||||
map(hosts, async host => {
|
||||
this._createOvsdbClient(host)
|
||||
})
|
||||
)
|
||||
|
||||
// Add already existing pool-wide private networks
|
||||
const networks = filter(xapi.objects.all, { $type: 'network' })
|
||||
forOwn(networks, async network => {
|
||||
if (network.other_config.private_pool_wide === 'true') {
|
||||
log.debug('Adding network to managed networks', {
|
||||
network: network.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
const center = await this._electNewCenter(network, true)
|
||||
this._poolNetworks.push({
|
||||
pool: network.$pool.$ref,
|
||||
network: network.$ref,
|
||||
starCenter: center?.$ref,
|
||||
})
|
||||
this._networks.set(network.$id, network.$ref)
|
||||
if (center != null) {
|
||||
this._starCenters.set(center.$id, center.$ref)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async unload() {
|
||||
this._ovsdbClients = []
|
||||
this._poolNetworks = []
|
||||
this._newHosts = []
|
||||
|
||||
this._networks.clear()
|
||||
this._starCenters.clear()
|
||||
|
||||
this._cleaners.forEach(cleaner => cleaner())
|
||||
this._cleaners = []
|
||||
|
||||
this._unsetApiMethod()
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
|
||||
async _createPrivateNetwork({
|
||||
xoPool,
|
||||
networkName,
|
||||
networkDescription,
|
||||
encapsulation,
|
||||
}) {
|
||||
const pool = this._xo.getXapiObject(xoPool)
|
||||
await this._setPoolControllerIfNeeded(pool)
|
||||
|
||||
// Create the private network
|
||||
const privateNetworkRef = await pool.$xapi.call('network.create', {
|
||||
name_label: networkName,
|
||||
name_description: networkDescription,
|
||||
MTU: 0,
|
||||
other_config: {
|
||||
automatic: 'false',
|
||||
private_pool_wide: 'true',
|
||||
encapsulation: encapsulation,
|
||||
},
|
||||
})
|
||||
|
||||
const privateNetwork = await pool.$xapi._getOrWaitObject(privateNetworkRef)
|
||||
|
||||
log.info('New private network created', {
|
||||
network: privateNetwork.name_label,
|
||||
pool: pool.name_label,
|
||||
})
|
||||
|
||||
// For each pool's host, create a tunnel to the private network
|
||||
const hosts = filter(pool.$xapi.objects.all, { $type: 'host' })
|
||||
await Promise.all(
|
||||
map(hosts, async host => {
|
||||
await this._createTunnel(host, privateNetwork)
|
||||
this._createOvsdbClient(host)
|
||||
})
|
||||
)
|
||||
|
||||
const center = await this._electNewCenter(privateNetwork, false)
|
||||
this._poolNetworks.push({
|
||||
pool: pool.$ref,
|
||||
network: privateNetwork.$ref,
|
||||
starCenter: center?.$ref,
|
||||
encapsulation: encapsulation,
|
||||
})
|
||||
this._networks.set(privateNetwork.$id, privateNetwork.$ref)
|
||||
if (center != null) {
|
||||
this._starCenters.set(center.$id, center.$ref)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _manageXapi(xapi) {
|
||||
const { objects } = xapi
|
||||
|
||||
const objectsRemovedXapi = this._objectsRemoved.bind(this, xapi)
|
||||
objects.on('add', this._objectsAdded)
|
||||
objects.on('update', this._objectsUpdated)
|
||||
objects.on('remove', objectsRemovedXapi)
|
||||
|
||||
await this._installCaCertificateIfNeeded(xapi)
|
||||
|
||||
return () => {
|
||||
objects.removeListener('add', this._objectsAdded)
|
||||
objects.removeListener('update', this._objectsUpdated)
|
||||
objects.removeListener('remove', objectsRemovedXapi)
|
||||
}
|
||||
}
|
||||
|
||||
async _objectsAdded(objects) {
|
||||
await Promise.all(
|
||||
map(objects, async object => {
|
||||
const { $type } = object
|
||||
|
||||
if ($type === 'host') {
|
||||
log.debug('New host', {
|
||||
host: object.name_label,
|
||||
pool: object.$pool.name_label,
|
||||
})
|
||||
|
||||
if (find(this._newHosts, { $ref: object.$ref }) == null) {
|
||||
this._newHosts.push(object)
|
||||
}
|
||||
this._createOvsdbClient(object)
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async _objectsUpdated(objects) {
|
||||
await Promise.all(
|
||||
map(objects, async (object, id) => {
|
||||
const { $type } = object
|
||||
|
||||
if ($type === 'PIF') {
|
||||
await this._pifUpdated(object)
|
||||
} else if ($type === 'host') {
|
||||
await this._hostUpdated(object)
|
||||
} else if ($type === 'host_metrics') {
|
||||
await this._hostMetricsUpdated(object)
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async _objectsRemoved(xapi, objects) {
|
||||
await Promise.all(
|
||||
map(objects, async (object, id) => {
|
||||
const client = find(this._ovsdbClients, { id: id })
|
||||
if (client != null) {
|
||||
this._ovsdbClients.splice(this._ovsdbClients.indexOf(client), 1)
|
||||
}
|
||||
|
||||
// If a Star center host is removed: re-elect a new center where needed
|
||||
const starCenterRef = this._starCenters.get(id)
|
||||
if (starCenterRef != null) {
|
||||
this._starCenters.delete(id)
|
||||
const poolNetworks = filter(this._poolNetworks, {
|
||||
starCenter: starCenterRef,
|
||||
})
|
||||
for (const poolNetwork of poolNetworks) {
|
||||
const network = xapi.getObjectByRef(poolNetwork.network)
|
||||
const newCenter = await this._electNewCenter(network, true)
|
||||
poolNetwork.starCenter = newCenter?.$ref
|
||||
if (newCenter != null) {
|
||||
this._starCenters.set(newCenter.$id, newCenter.$ref)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If a network is removed, clean this._poolNetworks from it
|
||||
const networkRef = this._networks.get(id)
|
||||
if (networkRef != null) {
|
||||
this._networks.delete(id)
|
||||
const poolNetwork = find(this._poolNetworks, {
|
||||
network: networkRef,
|
||||
})
|
||||
if (poolNetwork != null) {
|
||||
this._poolNetworks.splice(
|
||||
this._poolNetworks.indexOf(poolNetwork),
|
||||
1
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async _pifUpdated(pif) {
|
||||
// Only if PIF is in a private network
|
||||
const poolNetwork = find(this._poolNetworks, { network: pif.network })
|
||||
if (poolNetwork == null) {
|
||||
return
|
||||
}
|
||||
|
||||
if (!pif.currently_attached) {
|
||||
if (poolNetwork.starCenter !== pif.host) {
|
||||
return
|
||||
}
|
||||
|
||||
log.debug(
|
||||
'PIF of star-center host has been unplugged, electing a new star-center',
|
||||
{
|
||||
pif: pif.device,
|
||||
network: pif.$network.name_label,
|
||||
host: pif.$host.name_label,
|
||||
pool: pif.$pool.name_label,
|
||||
}
|
||||
)
|
||||
const newCenter = await this._electNewCenter(pif.$network, true)
|
||||
poolNetwork.starCenter = newCenter?.$ref
|
||||
this._starCenters.delete(pif.$host.$id)
|
||||
if (newCenter != null) {
|
||||
this._starCenters.set(newCenter.$id, newCenter.$ref)
|
||||
}
|
||||
} else {
|
||||
if (poolNetwork.starCenter == null) {
|
||||
const host = pif.$host
|
||||
log.debug('First available host becomes star center of network', {
|
||||
host: host.name_label,
|
||||
network: pif.$network.name_label,
|
||||
pool: pif.$pool.name_label,
|
||||
})
|
||||
poolNetwork.starCenter = pif.host
|
||||
this._starCenters.set(host.$id, host.$ref)
|
||||
}
|
||||
|
||||
log.debug('PIF plugged', {
|
||||
pif: pif.device,
|
||||
network: pif.$network.name_label,
|
||||
host: pif.$host.name_label,
|
||||
pool: pif.$pool.name_label,
|
||||
})
|
||||
|
||||
const starCenter = pif.$xapi.getObjectByRef(poolNetwork.starCenter)
|
||||
await this._addHostToNetwork(pif.$host, pif.$network, starCenter)
|
||||
}
|
||||
}
|
||||
|
||||
async _hostUpdated(host) {
|
||||
const xapi = host.$xapi
|
||||
|
||||
if (host.enabled) {
|
||||
if (host.PIFs.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const newHost = find(this._newHosts, { $ref: host.$ref })
|
||||
if (newHost != null) {
|
||||
this._newHosts.splice(this._newHosts.indexOf(newHost), 1)
|
||||
try {
|
||||
await xapi.call('pool.certificate_sync')
|
||||
} catch (error) {
|
||||
log.error('Error while syncing SDN controller CA certificate', {
|
||||
error,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _hostMetricsUpdated(hostMetrics) {
|
||||
const ovsdbClient = find(this._ovsdbClients, {
|
||||
hostMetricsRef: hostMetrics.$ref,
|
||||
})
|
||||
const host = ovsdbClient._host
|
||||
|
||||
if (hostMetrics.live) {
|
||||
await this._addHostToPoolNetworks(host)
|
||||
} else {
|
||||
await this._hostUnreachable(host)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _setPoolControllerIfNeeded(pool) {
|
||||
if (!this._setControllerNeeded(pool.$xapi)) {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
const controller = find(pool.$xapi.objects.all, { $type: 'SDN_controller' })
|
||||
if (controller != null) {
|
||||
await pool.$xapi.call('SDN_controller.forget', controller.$ref)
|
||||
log.debug('Old SDN controller removed', {
|
||||
pool: pool.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
await pool.$xapi.call('SDN_controller.introduce', PROTOCOL)
|
||||
log.debug('SDN controller has been set', {
|
||||
pool: pool.name_label,
|
||||
})
|
||||
this._cleaners.push(await this._manageXapi(pool.$xapi))
|
||||
}
|
||||
|
||||
_setControllerNeeded(xapi) {
|
||||
const controller = find(xapi.objects.all, { $type: 'SDN_controller' })
|
||||
return !(
|
||||
controller != null &&
|
||||
controller.protocol === PROTOCOL &&
|
||||
controller.address === '' &&
|
||||
controller.port === 0
|
||||
)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _installCaCertificateIfNeeded(xapi) {
|
||||
let needInstall = false
|
||||
try {
|
||||
const result = await xapi.call('pool.certificate_list')
|
||||
if (!result.includes(SDN_CONTROLLER_CERT)) {
|
||||
needInstall = true
|
||||
} else if (this._overrideCerts) {
|
||||
await xapi.call('pool.certificate_uninstall', SDN_CONTROLLER_CERT)
|
||||
log.debug('Old SDN controller CA certificate uninstalled', {
|
||||
pool: xapi.pool.name_label,
|
||||
})
|
||||
needInstall = true
|
||||
}
|
||||
} catch (error) {
|
||||
log.error('Error while retrieving certificate list', {
|
||||
error,
|
||||
pool: xapi.pool.name_label,
|
||||
})
|
||||
}
|
||||
if (!needInstall) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
await xapi.call(
|
||||
'pool.certificate_install',
|
||||
SDN_CONTROLLER_CERT,
|
||||
this._caCert.toString()
|
||||
)
|
||||
await xapi.call('pool.certificate_sync')
|
||||
log.debug('SDN controller CA certficate installed', {
|
||||
pool: xapi.pool.name_label,
|
||||
})
|
||||
} catch (error) {
|
||||
log.error('Error while installing SDN controller CA certificate', {
|
||||
error,
|
||||
pool: xapi.pool.name_label,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _electNewCenter(network, resetNeeded) {
|
||||
const pool = network.$pool
|
||||
|
||||
let newCenter = null
|
||||
const hosts = filter(pool.$xapi.objects.all, { $type: 'host' })
|
||||
|
||||
for (const host of hosts) {
|
||||
const pif = find(host.$PIFs, { network: network.$ref })
|
||||
if (pif !== undefined && pif.currently_attached && host.$metrics.live) {
|
||||
newCenter = host
|
||||
}
|
||||
}
|
||||
|
||||
await Promise.all(
|
||||
map(hosts, async host => {
|
||||
if (!resetNeeded) {
|
||||
return
|
||||
}
|
||||
|
||||
// Clean old ports and interfaces
|
||||
const hostClient = find(this._ovsdbClients, { host: host.$ref })
|
||||
if (hostClient != null) {
|
||||
try {
|
||||
await hostClient.resetForNetwork(network.uuid, network.name_label)
|
||||
} catch (error) {
|
||||
log.error('Error while resetting private network', {
|
||||
error,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
if (newCenter == null) {
|
||||
log.error('No available host to elect new star-center', {
|
||||
network: network.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
return null
|
||||
}
|
||||
|
||||
// Recreate star topology
|
||||
await Promise.all(
|
||||
await map(hosts, async host => {
|
||||
await this._addHostToNetwork(host, network, newCenter)
|
||||
})
|
||||
)
|
||||
|
||||
log.info('New star-center elected', {
|
||||
center: newCenter.name_label,
|
||||
network: network.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
|
||||
return newCenter
|
||||
}
|
||||
|
||||
async _createTunnel(host, network) {
|
||||
const pif = host.$PIFs.find(
|
||||
pif => pif.physical && pif.ip_configuration_mode !== 'None'
|
||||
)
|
||||
if (pif == null) {
|
||||
log.error('No PIF found to create tunnel', {
|
||||
host: host.name_label,
|
||||
network: network.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
await host.$xapi.call('tunnel.create', pif.$ref, network.$ref)
|
||||
log.debug('New tunnel added', {
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
async _addHostToNetwork(host, network, starCenter) {
|
||||
if (host.$ref === starCenter.$ref) {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
const hostClient = find(this._ovsdbClients, {
|
||||
host: host.$ref,
|
||||
})
|
||||
if (hostClient == null) {
|
||||
log.error('No OVSDB client found', {
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const starCenterClient = find(this._ovsdbClients, {
|
||||
host: starCenter.$ref,
|
||||
})
|
||||
if (starCenterClient == null) {
|
||||
log.error('No OVSDB client found for star-center', {
|
||||
host: starCenter.name_label,
|
||||
pool: starCenter.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const encapsulation =
|
||||
network.other_config.encapsulation != null
|
||||
? network.other_config.encapsulation
|
||||
: 'gre'
|
||||
|
||||
try {
|
||||
await hostClient.addInterfaceAndPort(
|
||||
network.uuid,
|
||||
network.name_label,
|
||||
starCenterClient.address,
|
||||
encapsulation
|
||||
)
|
||||
await starCenterClient.addInterfaceAndPort(
|
||||
network.uuid,
|
||||
network.name_label,
|
||||
hostClient.address,
|
||||
encapsulation
|
||||
)
|
||||
} catch (error) {
|
||||
log.error('Error while connection host to private network', {
|
||||
error,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _addHostToPoolNetworks(host) {
|
||||
const xapi = host.$xapi
|
||||
|
||||
const tunnels = filter(xapi.objects.all, { $type: 'tunnel' })
|
||||
for (const tunnel of tunnels) {
|
||||
const accessPif = xapi.getObjectByRef(tunnel.access_PIF)
|
||||
if (accessPif.host !== host.$ref) {
|
||||
continue
|
||||
}
|
||||
|
||||
const poolNetwork = find(this._poolNetworks, {
|
||||
network: accessPif.network,
|
||||
})
|
||||
if (poolNetwork == null || accessPif.currently_attached) {
|
||||
continue
|
||||
}
|
||||
|
||||
try {
|
||||
await xapi.call('PIF.plug', accessPif.$ref)
|
||||
} catch (error) {
|
||||
log.error('Error while plugging PIF', {
|
||||
error,
|
||||
pif: accessPif.device,
|
||||
network: accessPif.$network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
log.debug('PIF plugged', {
|
||||
pif: accessPif.device,
|
||||
network: accessPif.$network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
|
||||
const starCenter = xapi.getObjectByRef(poolNetwork.starCenter)
|
||||
await this._addHostToNetwork(host, accessPif.$network, starCenter)
|
||||
}
|
||||
}
|
||||
|
||||
async _hostUnreachable(host) {
|
||||
const poolNetworks = filter(this._poolNetworks, { starCenter: host.$ref })
|
||||
for (const poolNetwork of poolNetworks) {
|
||||
const network = host.$xapi.getObjectByRef(poolNetwork.network)
|
||||
log.debug('Unreachable star-center, electing a new one', {
|
||||
network: network.name_label,
|
||||
center: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
|
||||
const newCenter = await this._electNewCenter(network, true)
|
||||
poolNetwork.starCenter = newCenter?.$ref
|
||||
this._starCenters.delete(host.$id)
|
||||
if (newCenter !== null) {
|
||||
this._starCenters.set(newCenter.$id, newCenter.$ref)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_createOvsdbClient(host) {
|
||||
const foundClient = find(this._ovsdbClients, { host: host.$ref })
|
||||
if (foundClient != null) {
|
||||
return foundClient
|
||||
}
|
||||
|
||||
const client = new OvsdbClient(
|
||||
host,
|
||||
this._clientKey,
|
||||
this._clientCert,
|
||||
this._caCert
|
||||
)
|
||||
this._ovsdbClients.push(client)
|
||||
return client
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _generateCertificatesAndKey(dataDir) {
|
||||
const openssl = new NodeOpenssl()
|
||||
|
||||
const rsakeyoptions = {
|
||||
rsa_keygen_bits: 4096,
|
||||
format: 'PKCS8',
|
||||
}
|
||||
const subject = {
|
||||
countryName: 'XX',
|
||||
localityName: 'Default City',
|
||||
organizationName: 'Default Company LTD',
|
||||
}
|
||||
const csroptions = {
|
||||
hash: 'sha256',
|
||||
startdate: new Date('1984-02-04 00:00:00'),
|
||||
enddate: new Date('2143-06-04 04:16:23'),
|
||||
subject: subject,
|
||||
}
|
||||
const cacsroptions = {
|
||||
hash: 'sha256',
|
||||
days: NB_DAYS,
|
||||
subject: subject,
|
||||
}
|
||||
|
||||
openssl.generateRSAPrivateKey(rsakeyoptions, (error, cakey, cmd) => {
|
||||
if (error !== undefined) {
|
||||
log.error('Error while generating CA private key', {
|
||||
error,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
openssl.generateCSR(cacsroptions, cakey, null, (error, csr, cmd) => {
|
||||
if (error !== undefined) {
|
||||
log.error('Error while generating CA certificate', {
|
||||
error,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
openssl.selfSignCSR(
|
||||
csr,
|
||||
cacsroptions,
|
||||
cakey,
|
||||
null,
|
||||
async (error, cacrt, cmd) => {
|
||||
if (error !== undefined) {
|
||||
log.error('Error while signing CA certificate', {
|
||||
error,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
await fileWrite(join(dataDir, CA_CERT), cacrt)
|
||||
openssl.generateRSAPrivateKey(
|
||||
rsakeyoptions,
|
||||
async (error, key, cmd) => {
|
||||
if (error !== undefined) {
|
||||
log.error('Error while generating private key', {
|
||||
error,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
await fileWrite(join(dataDir, CLIENT_KEY), key)
|
||||
openssl.generateCSR(
|
||||
csroptions,
|
||||
key,
|
||||
null,
|
||||
(error, csr, cmd) => {
|
||||
if (error !== undefined) {
|
||||
log.error('Error while generating certificate', {
|
||||
error,
|
||||
})
|
||||
return
|
||||
}
|
||||
openssl.CASignCSR(
|
||||
csr,
|
||||
cacsroptions,
|
||||
false,
|
||||
cacrt,
|
||||
cakey,
|
||||
null,
|
||||
async (error, crt, cmd) => {
|
||||
if (error !== undefined) {
|
||||
log.error('Error while signing certificate', {
|
||||
error,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
await fileWrite(join(dataDir, CLIENT_CERT), crt)
|
||||
this.emit('certWritten')
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
await fromEvent(this, 'certWritten', {})
|
||||
log.debug('All certificates have been successfully written')
|
||||
}
|
||||
}
|
||||
|
||||
export default opts => new SDNController(opts)
|
||||
513
packages/xo-server-sdn-controller/src/ovsdb-client.js
Normal file
513
packages/xo-server-sdn-controller/src/ovsdb-client.js
Normal file
@@ -0,0 +1,513 @@
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import forOwn from 'lodash/forOwn'
|
||||
import fromEvent from 'promise-toolbox/fromEvent'
|
||||
import { connect } from 'tls'
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller:ovsdb-client')
|
||||
|
||||
const OVSDB_PORT = 6640
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export class OvsdbClient {
|
||||
constructor(host, clientKey, clientCert, caCert) {
|
||||
this._host = host
|
||||
this._numberOfPortAndInterface = 0
|
||||
this._requestID = 0
|
||||
|
||||
this.updateCertificates(clientKey, clientCert, caCert)
|
||||
|
||||
log.debug('New OVSDB client', {
|
||||
host: this._host.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
get address() {
|
||||
return this._host.address
|
||||
}
|
||||
|
||||
get host() {
|
||||
return this._host.$ref
|
||||
}
|
||||
|
||||
get id() {
|
||||
return this._host.$id
|
||||
}
|
||||
|
||||
get hostMetricsRef() {
|
||||
return this._host.metrics
|
||||
}
|
||||
|
||||
updateCertificates(clientKey, clientCert, caCert) {
|
||||
this._clientKey = clientKey
|
||||
this._clientCert = clientCert
|
||||
this._caCert = caCert
|
||||
|
||||
log.debug('Certificates have been updated', {
|
||||
host: this._host.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async addInterfaceAndPort(
|
||||
networkUuid,
|
||||
networkName,
|
||||
remoteAddress,
|
||||
encapsulation
|
||||
) {
|
||||
const socket = await this._connect()
|
||||
const index = this._numberOfPortAndInterface
|
||||
++this._numberOfPortAndInterface
|
||||
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
const alreadyExist = await this._interfaceAndPortAlreadyExist(
|
||||
bridgeUuid,
|
||||
bridgeName,
|
||||
remoteAddress,
|
||||
socket
|
||||
)
|
||||
if (alreadyExist) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
const interfaceName = 'tunnel_iface' + index
|
||||
const portName = 'tunnel_port' + index
|
||||
|
||||
// Add interface and port to the bridge
|
||||
const options = ['map', [['remote_ip', remoteAddress]]]
|
||||
const addInterfaceOperation = {
|
||||
op: 'insert',
|
||||
table: 'Interface',
|
||||
row: {
|
||||
type: encapsulation,
|
||||
options: options,
|
||||
name: interfaceName,
|
||||
other_config: ['map', [['private_pool_wide', 'true']]],
|
||||
},
|
||||
'uuid-name': 'new_iface',
|
||||
}
|
||||
const addPortOperation = {
|
||||
op: 'insert',
|
||||
table: 'Port',
|
||||
row: {
|
||||
name: portName,
|
||||
interfaces: ['set', [['named-uuid', 'new_iface']]],
|
||||
other_config: ['map', [['private_pool_wide', 'true']]],
|
||||
},
|
||||
'uuid-name': 'new_port',
|
||||
}
|
||||
const mutateBridgeOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
|
||||
mutations: [['ports', 'insert', ['set', [['named-uuid', 'new_port']]]]],
|
||||
}
|
||||
const params = [
|
||||
'Open_vSwitch',
|
||||
addInterfaceOperation,
|
||||
addPortOperation,
|
||||
mutateBridgeOperation,
|
||||
]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
let error
|
||||
let details
|
||||
let i = 0
|
||||
let opResult
|
||||
do {
|
||||
opResult = jsonObjects[0].result[i]
|
||||
if (opResult != null && opResult.error != null) {
|
||||
error = opResult.error
|
||||
details = opResult.details
|
||||
}
|
||||
++i
|
||||
} while (opResult && !error)
|
||||
|
||||
if (error != null) {
|
||||
log.error('Error while adding port and interface to bridge', {
|
||||
error,
|
||||
details,
|
||||
port: portName,
|
||||
interface: interfaceName,
|
||||
bridge: bridgeName,
|
||||
network: networkName,
|
||||
host: this._host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
log.debug('Port and interface added to bridge', {
|
||||
port: portName,
|
||||
interface: interfaceName,
|
||||
bridge: bridgeName,
|
||||
network: networkName,
|
||||
host: this._host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
}
|
||||
|
||||
async resetForNetwork(networkUuid, networkName) {
|
||||
const socket = await this._connect()
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
// Delete old ports created by a SDN controller
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
if (ports == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
const portsToDelete = []
|
||||
for (const port of ports) {
|
||||
const portUuid = port[1]
|
||||
|
||||
const where = [['_uuid', '==', ['uuid', portUuid]]]
|
||||
const selectResult = await this._select(
|
||||
'Port',
|
||||
['name', 'other_config'],
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult == null) {
|
||||
continue
|
||||
}
|
||||
|
||||
forOwn(selectResult.other_config[1], config => {
|
||||
if (config[0] === 'private_pool_wide' && config[1] === 'true') {
|
||||
portsToDelete.push(['uuid', portUuid])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (portsToDelete.length === 0) {
|
||||
// Nothing to do
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
const mutateBridgeOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
|
||||
mutations: [['ports', 'delete', ['set', portsToDelete]]],
|
||||
}
|
||||
|
||||
const params = ['Open_vSwitch', mutateBridgeOperation]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
if (jsonObjects[0].error != null) {
|
||||
log.error('Error while deleting ports from bridge', {
|
||||
error: jsonObjects.error,
|
||||
bridge: bridgeName,
|
||||
host: this._host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
log.debug('Ports deleted from bridge', {
|
||||
nPorts: jsonObjects[0].result[0].count,
|
||||
bridge: bridgeName,
|
||||
host: this._host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
|
||||
_parseJson(chunk) {
|
||||
let data = chunk.toString()
|
||||
let buffer = ''
|
||||
let depth = 0
|
||||
let pos = 0
|
||||
const objects = []
|
||||
|
||||
for (let i = pos; i < data.length; ++i) {
|
||||
const c = data.charAt(i)
|
||||
if (c === '{') {
|
||||
depth++
|
||||
} else if (c === '}') {
|
||||
depth--
|
||||
if (depth === 0) {
|
||||
const object = JSON.parse(buffer + data.substr(0, i + 1))
|
||||
objects.push(object)
|
||||
buffer = ''
|
||||
data = data.substr(i + 1)
|
||||
pos = 0
|
||||
i = -1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buffer += data
|
||||
return objects
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _getBridgeUuidForNetwork(networkUuid, networkName, socket) {
|
||||
const where = [
|
||||
[
|
||||
'external_ids',
|
||||
'includes',
|
||||
['map', [['xs-network-uuids', networkUuid]]],
|
||||
],
|
||||
]
|
||||
const selectResult = await this._select(
|
||||
'Bridge',
|
||||
['_uuid', 'name'],
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult == null) {
|
||||
log.error('No bridge found for network', {
|
||||
network: networkName,
|
||||
host: this._host.name_label,
|
||||
})
|
||||
return [null, null]
|
||||
}
|
||||
|
||||
const bridgeUuid = selectResult._uuid[1]
|
||||
const bridgeName = selectResult.name
|
||||
|
||||
return [bridgeUuid, bridgeName]
|
||||
}
|
||||
|
||||
async _interfaceAndPortAlreadyExist(
|
||||
bridgeUuid,
|
||||
bridgeName,
|
||||
remoteAddress,
|
||||
socket
|
||||
) {
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
if (ports == null) {
|
||||
return
|
||||
}
|
||||
|
||||
for (const port of ports) {
|
||||
const portUuid = port[1]
|
||||
const interfaces = await this._getPortInterfaces(portUuid, socket)
|
||||
if (interfaces == null) {
|
||||
continue
|
||||
}
|
||||
|
||||
for (const iface of interfaces) {
|
||||
const interfaceUuid = iface[1]
|
||||
const hasRemote = await this._interfaceHasRemote(
|
||||
interfaceUuid,
|
||||
remoteAddress,
|
||||
socket
|
||||
)
|
||||
if (hasRemote === true) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
async _getBridgePorts(bridgeUuid, bridgeName, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', bridgeUuid]]]
|
||||
const selectResult = await this._select('Bridge', ['ports'], where, socket)
|
||||
if (selectResult == null) {
|
||||
return null
|
||||
}
|
||||
|
||||
return selectResult.ports[0] === 'set'
|
||||
? selectResult.ports[1]
|
||||
: [selectResult.ports]
|
||||
}
|
||||
|
||||
async _getPortInterfaces(portUuid, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', portUuid]]]
|
||||
const selectResult = await this._select(
|
||||
'Port',
|
||||
['name', 'interfaces'],
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult == null) {
|
||||
return null
|
||||
}
|
||||
|
||||
return selectResult.interfaces[0] === 'set'
|
||||
? selectResult.interfaces[1]
|
||||
: [selectResult.interfaces]
|
||||
}
|
||||
|
||||
async _interfaceHasRemote(interfaceUuid, remoteAddress, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', interfaceUuid]]]
|
||||
const selectResult = await this._select(
|
||||
'Interface',
|
||||
['name', 'options'],
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult == null) {
|
||||
return false
|
||||
}
|
||||
|
||||
for (const option of selectResult.options[1]) {
|
||||
if (option[0] === 'remote_ip' && option[1] === remoteAddress) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _select(table, columns, where, socket) {
|
||||
const selectOperation = {
|
||||
op: 'select',
|
||||
table: table,
|
||||
columns: columns,
|
||||
where: where,
|
||||
}
|
||||
|
||||
const params = ['Open_vSwitch', selectOperation]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects == null) {
|
||||
return
|
||||
}
|
||||
const jsonResult = jsonObjects[0].result[0]
|
||||
if (jsonResult.error != null) {
|
||||
log.error('Error while selecting columns', {
|
||||
error: jsonResult.error,
|
||||
details: jsonResult.details,
|
||||
columns,
|
||||
table,
|
||||
where,
|
||||
host: this._host.name_label,
|
||||
})
|
||||
return null
|
||||
}
|
||||
|
||||
if (jsonResult.rows.length === 0) {
|
||||
log.error('No result for select', {
|
||||
columns,
|
||||
table,
|
||||
where,
|
||||
host: this._host.name_label,
|
||||
})
|
||||
return null
|
||||
}
|
||||
|
||||
// For now all select operations should return only 1 row
|
||||
assert(
|
||||
jsonResult.rows.length === 1,
|
||||
`[${this._host.name_label}] There should exactly 1 row when searching: '${columns}' in: '${table}' where: '${where}'`
|
||||
)
|
||||
|
||||
return jsonResult.rows[0]
|
||||
}
|
||||
|
||||
async _sendOvsdbTransaction(params, socket) {
|
||||
const stream = socket
|
||||
|
||||
const requestId = this._requestID
|
||||
++this._requestID
|
||||
const req = {
|
||||
id: requestId,
|
||||
method: 'transact',
|
||||
params: params,
|
||||
}
|
||||
|
||||
try {
|
||||
stream.write(JSON.stringify(req))
|
||||
} catch (error) {
|
||||
log.error('Error while writing into stream', {
|
||||
error,
|
||||
host: this._host.name_label,
|
||||
})
|
||||
return null
|
||||
}
|
||||
|
||||
let result
|
||||
let jsonObjects
|
||||
let resultRequestId
|
||||
do {
|
||||
try {
|
||||
result = await fromEvent(stream, 'data', {})
|
||||
} catch (error) {
|
||||
log.error('Error while waiting for stream data', {
|
||||
error,
|
||||
host: this._host.name_label,
|
||||
})
|
||||
return null
|
||||
}
|
||||
|
||||
jsonObjects = this._parseJson(result)
|
||||
resultRequestId = jsonObjects[0].id
|
||||
} while (resultRequestId !== requestId)
|
||||
|
||||
return jsonObjects
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _connect() {
|
||||
const options = {
|
||||
ca: this._caCert,
|
||||
key: this._clientKey,
|
||||
cert: this._clientCert,
|
||||
host: this._host.address,
|
||||
port: OVSDB_PORT,
|
||||
rejectUnauthorized: false,
|
||||
requestCert: false,
|
||||
}
|
||||
const socket = connect(options)
|
||||
|
||||
try {
|
||||
await fromEvent(socket, 'secureConnect', {})
|
||||
} catch (error) {
|
||||
log.error('TLS connection failed', {
|
||||
error,
|
||||
code: error.code,
|
||||
host: this._host.name_label,
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
socket.on('error', error => {
|
||||
log.error('Socket error', {
|
||||
error,
|
||||
code: error.code,
|
||||
host: this._host.name_label,
|
||||
})
|
||||
})
|
||||
|
||||
return socket
|
||||
}
|
||||
}
|
||||
8
packages/xo-server-test/.babelrc.js
Normal file
8
packages/xo-server-test/.babelrc.js
Normal file
@@ -0,0 +1,8 @@
|
||||
const pkg = require('./package.json')
|
||||
|
||||
// `xo-server-test` is a special package which has no dev dependencies but our
|
||||
// babel config generator only looks in `devDependencies`.
|
||||
require('assert').strictEqual(pkg.devDependencies, undefined)
|
||||
pkg.devDependencies = pkg.dependencies
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(pkg)
|
||||
24
packages/xo-server-test/.npmignore
Normal file
24
packages/xo-server-test/.npmignore
Normal file
@@ -0,0 +1,24 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
136
packages/xo-server-test/README.md
Normal file
136
packages/xo-server-test/README.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# xo-server-test
|
||||
|
||||
> Test client for Xo-Server
|
||||
|
||||
## Adding a test
|
||||
|
||||
### Organization
|
||||
|
||||
```
|
||||
src
|
||||
├─ user
|
||||
| ├─ __snapshots__
|
||||
| | └─ index.spec.js.snap
|
||||
| └─ index.spec.js
|
||||
├─ job
|
||||
¦ └─ index.spec.js
|
||||
¦
|
||||
¦
|
||||
├─ _xoConnection.js
|
||||
└─ util.js
|
||||
```
|
||||
|
||||
The tests can describe xo methods or scenarios:
|
||||
```javascript
|
||||
import xo from "../_xoConnection";
|
||||
|
||||
describe("user", () => {
|
||||
|
||||
// testing a method
|
||||
describe(".set()", () => {
|
||||
it("sets an email", async () => {
|
||||
// some tests using xo methods and helpers from _xoConnection.js
|
||||
const id = await xo.createTempUser(SIMPLE_USER);
|
||||
expect(await xo.call("user.set", params)).toBe(true);
|
||||
expect(await xo.getUser(id)).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// testing a scenario
|
||||
test("create two users, modify a user email to be the same with the other and fail trying to connect them", () => {
|
||||
/* some tests */
|
||||
});
|
||||
|
||||
});
|
||||
```
|
||||
|
||||
### Best practices
|
||||
|
||||
- The test environment must remain the same before and after each test:
|
||||
* each resource created must be deleted
|
||||
* existing resources should not be altered
|
||||
|
||||
- Make a sentence for the title of the test. It must be clear and consistent.
|
||||
|
||||
- If the feature you want to test is not implemented : write it and skip it, using `it.skip()`.
|
||||
|
||||
- Take values that cover the maximum of testing possibilities.
|
||||
|
||||
- If you make tests which keep track of large object, it is better to use snapshots.
|
||||
|
||||
- `_xoConnection.js` contains helpers to create temporary resources and to interface with XO.
|
||||
You can use it if you need to create resources which will be automatically deleted after the test:
|
||||
```javascript
|
||||
import xo from "../_xoConnection";
|
||||
|
||||
describe(".create()", () => {
|
||||
it("creates a user without permission", async () => {
|
||||
// The user will be deleted automatically at the end of the test
|
||||
const userId = await xo.createTempUser({
|
||||
email: "wayne1@vates.fr",
|
||||
password: "batman1",
|
||||
});
|
||||
expect(await xo.getUser(userId)).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
});
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
The available helpers:
|
||||
* `createTempUser(params)`
|
||||
* `getUser(id)`
|
||||
* `createTempJob(params)`
|
||||
* `createTempBackupNgJob(params)`
|
||||
* `createTempVm(params)`
|
||||
* `getSchedule(predicate)`
|
||||
|
||||
## Usage
|
||||
|
||||
- Before running the tests, you have to create a config file for xo-server-test.
|
||||
```
|
||||
> cp sample.config.toml ~/.config/xo-server-test/config.toml
|
||||
```
|
||||
And complete it.
|
||||
|
||||
- To run the tests:
|
||||
```
|
||||
> npm ci
|
||||
> yarn test
|
||||
```
|
||||
|
||||
You get all the test suites passed (`PASS`) or failed (`FAIL`).
|
||||
```
|
||||
> yarn test
|
||||
yarn run v1.9.4
|
||||
$ jest
|
||||
PASS src/user/user.spec.js
|
||||
PASS src/job/job.spec.js
|
||||
PASS src/backupNg/backupNg.spec.js
|
||||
|
||||
Test Suites: 3 passed, 3 total
|
||||
Tests: 2 skipped, 36 passed, 38 total
|
||||
Snapshots: 35 passed, 35 total
|
||||
Time: 7.257s, estimated 8s
|
||||
Ran all test suites.
|
||||
Done in 7.92s.
|
||||
```
|
||||
|
||||
- You can run only tests related to changed files, and review the failed output by using: `> yarn test --watch`
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
ISC © [Vates SAS](http://vates.fr)
|
||||
54
packages/xo-server-test/package.json
Normal file
54
packages/xo-server-test/package.json
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "xo-server-test",
|
||||
"version": "0.0.0",
|
||||
"license": "ISC",
|
||||
"description": "Test client for Xo-Server",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-test",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-server-test",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Julien Fontanet",
|
||||
"email": "julien.fontanet@isonoe.net"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
"@babel/core": "^7.1.6",
|
||||
"@babel/plugin-proposal-decorators": "^7.4.0",
|
||||
"@babel/preset-env": "^7.1.6",
|
||||
"@iarna/toml": "^2.2.1",
|
||||
"app-conf": "^0.7.0",
|
||||
"babel-plugin-lodash": "^3.2.11",
|
||||
"golike-defer": "^0.4.1",
|
||||
"jest": "^24.8.0",
|
||||
"lodash": "^4.17.11",
|
||||
"xo-collection": "^0.4.1",
|
||||
"xo-common": "^0.2.0",
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"scripts": {
|
||||
"dev-test": "jest --bail --watch",
|
||||
"test": "jest"
|
||||
},
|
||||
"jest": {
|
||||
"modulePathIgnorePatterns": [
|
||||
"<rootDir>/src/old-tests"
|
||||
],
|
||||
"testEnvironment": "node",
|
||||
"testRegex": "\\.spec\\.js$"
|
||||
}
|
||||
}
|
||||
18
packages/xo-server-test/sample.config.toml
Normal file
18
packages/xo-server-test/sample.config.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
[xoConnection]
|
||||
url = ''
|
||||
email = ''
|
||||
password = ''
|
||||
|
||||
[vms]
|
||||
default = ''
|
||||
|
||||
[templates]
|
||||
default = ''
|
||||
|
||||
[srs]
|
||||
default = ''
|
||||
|
||||
# resources created before all tests and deleted at the end.
|
||||
[preCreatedResources]
|
||||
[preCreatedResources.remotes]
|
||||
default = { name = '', url = '' }
|
||||
13
packages/xo-server-test/src/_config.js
Normal file
13
packages/xo-server-test/src/_config.js
Normal file
@@ -0,0 +1,13 @@
|
||||
import appConf from 'app-conf'
|
||||
import path from 'path'
|
||||
|
||||
/* eslint-env jest */
|
||||
|
||||
let config
|
||||
export { config as default }
|
||||
|
||||
beforeAll(async () => {
|
||||
config = await appConf.load('xo-server-test', {
|
||||
appDir: path.join(__dirname, '..'),
|
||||
})
|
||||
})
|
||||
6
packages/xo-server-test/src/_randomId.js
Normal file
6
packages/xo-server-test/src/_randomId.js
Normal file
@@ -0,0 +1,6 @@
|
||||
const randomId = () =>
|
||||
Math.random()
|
||||
.toString(36)
|
||||
.slice(2)
|
||||
|
||||
export { randomId as default }
|
||||
205
packages/xo-server-test/src/_xoConnection.js
Normal file
205
packages/xo-server-test/src/_xoConnection.js
Normal file
@@ -0,0 +1,205 @@
|
||||
/* eslint-env jest */
|
||||
import defer from 'golike-defer'
|
||||
import Xo from 'xo-lib'
|
||||
import XoCollection from 'xo-collection'
|
||||
import { find, forOwn } from 'lodash'
|
||||
|
||||
import config from './_config'
|
||||
|
||||
const ARGS_BY_TYPE = {
|
||||
remotes: {
|
||||
getCreationArgs: conf => ['remote.create', conf],
|
||||
getDeletionArgs: res => ['remote.delete', { id: res.id }],
|
||||
},
|
||||
}
|
||||
|
||||
const getDefaultCredentials = () => {
|
||||
const { email, password } = config.xoConnection
|
||||
return { email, password }
|
||||
}
|
||||
|
||||
class XoConnection extends Xo {
|
||||
constructor(opts) {
|
||||
super(opts)
|
||||
|
||||
const objects = (this._objects = new XoCollection())
|
||||
const watchers = (this._watchers = {})
|
||||
this._tempResourceDisposers = []
|
||||
this._durableResourceDisposers = []
|
||||
|
||||
this.on('notification', ({ method, params }) => {
|
||||
if (method !== 'all') {
|
||||
return
|
||||
}
|
||||
|
||||
const fn = params.type === 'exit' ? objects.unset : objects.set
|
||||
forOwn(params.items, (item, id) => {
|
||||
fn.call(objects, id, item)
|
||||
|
||||
const watcher = watchers[id]
|
||||
if (watcher !== undefined) {
|
||||
watcher(item)
|
||||
delete watchers[id]
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
get objects() {
|
||||
return this._objects
|
||||
}
|
||||
|
||||
async _fetchObjects() {
|
||||
const { _objects: objects, _watchers: watchers } = this
|
||||
forOwn(await this.call('xo.getAllObjects'), (object, id) => {
|
||||
objects.set(id, object)
|
||||
|
||||
const watcher = watchers[id]
|
||||
if (watcher !== undefined) {
|
||||
watcher(object)
|
||||
delete watchers[id]
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: integrate in xo-lib.
|
||||
waitObject(id) {
|
||||
return new Promise(resolve => {
|
||||
this._watchers[id] = resolve
|
||||
}) // FIXME: work with multiple listeners.
|
||||
}
|
||||
|
||||
async getOrWaitObject(id) {
|
||||
const object = this._objects.all[id]
|
||||
if (object !== undefined) {
|
||||
return object
|
||||
}
|
||||
return this.waitObject(id)
|
||||
}
|
||||
|
||||
@defer
|
||||
async connect($defer, credentials = getDefaultCredentials()) {
|
||||
await this.open()
|
||||
$defer.onFailure(() => this.close())
|
||||
|
||||
await this.signIn(credentials)
|
||||
await this._fetchObjects()
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
async waitObjectState(id, predicate) {
|
||||
let obj = this._objects.all[id]
|
||||
while (true) {
|
||||
try {
|
||||
await predicate(obj)
|
||||
return
|
||||
} catch (_) {}
|
||||
// If failed, wait for next object state/update and retry.
|
||||
obj = await this.waitObject(id)
|
||||
}
|
||||
}
|
||||
|
||||
async createTempUser(params) {
|
||||
const id = await this.call('user.create', params)
|
||||
this._tempResourceDisposers.push('user.delete', { id })
|
||||
return id
|
||||
}
|
||||
|
||||
async getUser(id) {
|
||||
return find(await super.call('user.getAll'), { id })
|
||||
}
|
||||
|
||||
async createTempJob(params) {
|
||||
const id = await this.call('job.create', { job: params })
|
||||
this._tempResourceDisposers.push('job.delete', { id })
|
||||
return id
|
||||
}
|
||||
|
||||
async createTempBackupNgJob(params) {
|
||||
const job = await this.call('backupNg.createJob', params)
|
||||
this._tempResourceDisposers.push('backupNg.deleteJob', { id: job.id })
|
||||
return job
|
||||
}
|
||||
|
||||
async createTempVm(params) {
|
||||
const id = await this.call('vm.create', params)
|
||||
this._tempResourceDisposers.push('vm.delete', { id })
|
||||
await this.waitObjectState(id, vm => {
|
||||
if (vm.type !== 'VM') throw new Error('retry')
|
||||
})
|
||||
return id
|
||||
}
|
||||
|
||||
async createRequiredResources() {
|
||||
const requiredResources = {}
|
||||
const resourcesToCreate = config.preCreatedResources
|
||||
for (const typeOfResources in resourcesToCreate) {
|
||||
const { getCreationArgs, getDeletionArgs } = ARGS_BY_TYPE[typeOfResources]
|
||||
const resources = resourcesToCreate[typeOfResources]
|
||||
for (const resource in resources) {
|
||||
const result = await this.call(...getCreationArgs(resources[resource]))
|
||||
this._durableResourceDisposers.push(...getDeletionArgs(result))
|
||||
requiredResources[typeOfResources] = {
|
||||
...requiredResources[typeOfResources],
|
||||
[resource]: result,
|
||||
}
|
||||
}
|
||||
}
|
||||
return requiredResources
|
||||
}
|
||||
|
||||
async getSchedule(predicate) {
|
||||
return find(await this.call('schedule.getAll'), predicate)
|
||||
}
|
||||
|
||||
async _cleanDisposers(disposers) {
|
||||
for (let n = disposers.length - 1; n > 0; ) {
|
||||
const params = disposers[n--]
|
||||
const method = disposers[n--]
|
||||
await this.call(method, params).catch(error => {
|
||||
console.warn('_cleanDisposers', method, params, error)
|
||||
})
|
||||
}
|
||||
disposers.length = 0
|
||||
}
|
||||
|
||||
async deleteTempResources() {
|
||||
await this._cleanDisposers(this._tempResourceDisposers)
|
||||
}
|
||||
|
||||
async deleteDurableResources() {
|
||||
await this._cleanDisposers(this._durableResourceDisposers)
|
||||
}
|
||||
}
|
||||
|
||||
const getConnection = credentials => {
|
||||
const xo = new XoConnection({ url: config.xoConnection.url })
|
||||
return xo.connect(credentials)
|
||||
}
|
||||
|
||||
let xo
|
||||
let resources
|
||||
beforeAll(async () => {
|
||||
xo = await getConnection()
|
||||
resources = await xo.createRequiredResources()
|
||||
})
|
||||
afterAll(async () => {
|
||||
await xo.deleteDurableResources()
|
||||
await xo.close()
|
||||
xo = null
|
||||
})
|
||||
afterEach(() => xo.deleteTempResources())
|
||||
|
||||
export { xo as default, resources }
|
||||
|
||||
export const testConnection = ({ credentials }) =>
|
||||
getConnection(credentials).then(connection => connection.close())
|
||||
|
||||
export const testWithOtherConnection = defer(
|
||||
async ($defer, credentials, functionToExecute) => {
|
||||
const xoUser = await getConnection(credentials)
|
||||
$defer(() => xoUser.close())
|
||||
await functionToExecute(xoUser)
|
||||
}
|
||||
)
|
||||
@@ -0,0 +1,170 @@
|
||||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`backupNg .createJob() : creates a new backup job with schedules 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"mode": "full",
|
||||
"name": "default-backupNg",
|
||||
"settings": Any<Object>,
|
||||
"type": "backup",
|
||||
"userId": Any<String>,
|
||||
"vms": Any<Object>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .createJob() : creates a new backup job with schedules 2`] = `
|
||||
Object {
|
||||
"cron": "0 * * * * *",
|
||||
"enabled": false,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"name": "scheduleTest",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .createJob() : creates a new backup job without schedules 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"mode": "full",
|
||||
"name": "default-backupNg",
|
||||
"settings": Object {
|
||||
"": Object {
|
||||
"reportWhen": "never",
|
||||
},
|
||||
},
|
||||
"type": "backup",
|
||||
"userId": Any<String>,
|
||||
"vms": Any<Object>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with a VM without disks 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "full",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"jobName": "default-backupNg",
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "skipped",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with a VM without disks 2`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"message": "no disks found",
|
||||
"name": "Error",
|
||||
"stack": Any<String>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "skipped",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with no matching VMs 1`] = `[JsonRpcError: unknown error from the peer]`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with non-existent vm 1`] = `
|
||||
Array [
|
||||
Object {
|
||||
"data": Object {
|
||||
"vms": Array [
|
||||
"non-existent-id",
|
||||
],
|
||||
},
|
||||
"message": "missingVms",
|
||||
},
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job without schedule 1`] = `[JsonRpcError: invalid parameters]`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run backup job without retentions 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "full",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"jobName": "default-backupNg",
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "failure",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run backup job without retentions 2`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"message": "copy, export and snapshot retentions cannot both be 0",
|
||||
"name": "Error",
|
||||
"stack": Any<String>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "failure",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "full",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"jobName": "default-backupNg",
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 2`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": "snapshot",
|
||||
"result": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 3`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
385
packages/xo-server-test/src/backupNg/backupNg.spec.js
Normal file
385
packages/xo-server-test/src/backupNg/backupNg.spec.js
Normal file
@@ -0,0 +1,385 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import config from '../_config'
|
||||
import randomId from '../_randomId'
|
||||
import xo, { resources } from '../_xoConnection'
|
||||
|
||||
const DEFAULT_SCHEDULE = {
|
||||
name: 'scheduleTest',
|
||||
cron: '0 * * * * *',
|
||||
}
|
||||
|
||||
describe('backupNg', () => {
|
||||
let defaultBackupNg
|
||||
|
||||
beforeAll(() => {
|
||||
defaultBackupNg = {
|
||||
name: 'default-backupNg',
|
||||
mode: 'full',
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
settings: {
|
||||
'': {
|
||||
reportWhen: 'never',
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
describe('.createJob() :', () => {
|
||||
it('creates a new backup job without schedules', async () => {
|
||||
const backupNg = await xo.createTempBackupNgJob(defaultBackupNg)
|
||||
expect(backupNg).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
userId: expect.any(String),
|
||||
vms: expect.any(Object),
|
||||
})
|
||||
expect(backupNg.vms).toEqual(defaultBackupNg.vms)
|
||||
expect(backupNg.userId).toBe(xo._user.id)
|
||||
})
|
||||
|
||||
it('creates a new backup job with schedules', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
})
|
||||
|
||||
const backupNgJob = await xo.call('backupNg.getJob', { id: jobId })
|
||||
|
||||
expect(backupNgJob).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
userId: expect.any(String),
|
||||
settings: expect.any(Object),
|
||||
vms: expect.any(Object),
|
||||
})
|
||||
expect(backupNgJob.vms).toEqual(defaultBackupNg.vms)
|
||||
expect(backupNgJob.userId).toBe(xo._user.id)
|
||||
|
||||
expect(Object.keys(backupNgJob.settings).length).toBe(2)
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
expect(backupNgJob.settings[schedule.id]).toEqual({
|
||||
snapshotRetention: 1,
|
||||
})
|
||||
|
||||
expect(schedule).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.delete() :', () => {
|
||||
it('deletes a backup job', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.call('backupNg.createJob', {
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
await xo.call('backupNg.deleteJob', { id: jobId })
|
||||
|
||||
let isRejectedJobErrorValid = false
|
||||
await xo.call('backupNg.getJob', { id: jobId }).catch(error => {
|
||||
isRejectedJobErrorValid = noSuchObject.is(error)
|
||||
})
|
||||
expect(isRejectedJobErrorValid).toBe(true)
|
||||
|
||||
let isRejectedScheduleErrorValid = false
|
||||
await xo.call('schedule.get', { id: schedule.id }).catch(error => {
|
||||
isRejectedScheduleErrorValid = noSuchObject.is(error)
|
||||
})
|
||||
expect(isRejectedScheduleErrorValid).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.runJob() :', () => {
|
||||
it('fails trying to run a backup job without schedule', async () => {
|
||||
const { id } = await xo.createTempBackupNgJob(defaultBackupNg)
|
||||
await expect(xo.call('backupNg.runJob', { id })).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to run a backup job with no matching VMs', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
name: 'test-vm-backupNg',
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
await expect(
|
||||
xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to run a backup job with non-existent vm', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
vms: {
|
||||
id: 'non-existent-id',
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
const [log] = await xo.call('backupNg.getLogs', {
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
expect(log.warnings).toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to run a backup job with a VM without disks', async () => {
|
||||
const vmIdWithoutDisks = await xo.createTempVm({
|
||||
name_label: 'XO Test Without Disks',
|
||||
name_description: 'Creating a vm without disks',
|
||||
template: config.templates.default,
|
||||
})
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
vms: {
|
||||
id: vmIdWithoutDisks,
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
|
||||
const [
|
||||
{
|
||||
tasks: [vmTask],
|
||||
...log
|
||||
},
|
||||
] = await xo.call('backupNg.getLogs', {
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
expect(log).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
})
|
||||
|
||||
expect(vmTask).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
data: {
|
||||
id: expect.any(String),
|
||||
},
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
result: {
|
||||
stack: expect.any(String),
|
||||
},
|
||||
start: expect.any(Number),
|
||||
})
|
||||
|
||||
expect(vmTask.data.id).toBe(vmIdWithoutDisks)
|
||||
})
|
||||
|
||||
it('fails trying to run backup job without retentions', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
remotes: {
|
||||
id: resources.remotes.default.id,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: {},
|
||||
},
|
||||
srs: {
|
||||
id: config.srs.default,
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
|
||||
const [
|
||||
{
|
||||
tasks: [task],
|
||||
...log
|
||||
},
|
||||
] = await xo.call('backupNg.getLogs', {
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
|
||||
expect(log).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
})
|
||||
|
||||
expect(task).toMatchSnapshot({
|
||||
data: {
|
||||
id: expect.any(String),
|
||||
},
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
result: {
|
||||
stack: expect.any(String),
|
||||
},
|
||||
start: expect.any(Number),
|
||||
})
|
||||
expect(task.data.id).toBe(config.vms.default)
|
||||
})
|
||||
})
|
||||
|
||||
test('execute three times a rolling snapshot with 2 as retention & revert to an old state', async () => {
|
||||
jest.setTimeout(7e4)
|
||||
const vmId = await xo.createTempVm({
|
||||
name_label: 'XO Test Temp',
|
||||
name_description: 'Creating a temporary vm',
|
||||
template: config.templates.default,
|
||||
VDIs: [
|
||||
{
|
||||
size: 1,
|
||||
SR: config.srs.default,
|
||||
type: 'user',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
vms: {
|
||||
id: vmId,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 2 },
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
for (let i = 0; i < 3; i++) {
|
||||
const oldSnapshots = xo.objects.all[vmId].snapshots
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
await xo.waitObjectState(vmId, ({ snapshots }) => {
|
||||
// Test on updating snapshots.
|
||||
expect(snapshots).not.toEqual(oldSnapshots)
|
||||
})
|
||||
}
|
||||
|
||||
const { snapshots, videoram: oldVideoram } = xo.objects.all[vmId]
|
||||
|
||||
// Test on the retention, how many snapshots should be saved.
|
||||
expect(snapshots.length).toBe(2)
|
||||
|
||||
const newVideoram = 16
|
||||
await xo.call('vm.set', { id: vmId, videoram: newVideoram })
|
||||
await xo.waitObjectState(vmId, ({ videoram }) => {
|
||||
expect(videoram).toBe(newVideoram.toString())
|
||||
})
|
||||
|
||||
await xo.call('vm.revert', {
|
||||
snapshot: snapshots[0],
|
||||
})
|
||||
|
||||
await xo.waitObjectState(vmId, ({ videoram }) => {
|
||||
expect(videoram).toBe(oldVideoram)
|
||||
})
|
||||
|
||||
const [
|
||||
{
|
||||
tasks: [{ tasks: subTasks, ...vmTask }],
|
||||
...log
|
||||
},
|
||||
] = await xo.call('backupNg.getLogs', {
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
|
||||
expect(log).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
})
|
||||
|
||||
const subTaskSnapshot = subTasks.find(
|
||||
({ message }) => message === 'snapshot'
|
||||
)
|
||||
expect(subTaskSnapshot).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
result: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
})
|
||||
|
||||
expect(vmTask).toMatchSnapshot({
|
||||
data: {
|
||||
id: expect.any(String),
|
||||
},
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
})
|
||||
expect(vmTask.data.id).toBe(vmId)
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,76 @@
|
||||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`job .create() : creates a new job 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.snapshot",
|
||||
"name": "jobTest",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`job .create() : fails trying to create a job without job params 1`] = `[JsonRpcError: invalid parameters]`;
|
||||
|
||||
exports[`job .delete() : deletes an existing job 1`] = `[JsonRpcError: no such job [object Object]]`;
|
||||
|
||||
exports[`job .delete() : deletes an existing job 2`] = `[JsonRpcError: no such schedule [object Object]]`;
|
||||
|
||||
exports[`job .get() : fails trying to get a job with a non existent id 1`] = `[JsonRpcError: no such job [object Object]]`;
|
||||
|
||||
exports[`job .get() : gets an existing job 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.snapshot",
|
||||
"name": "jobTest",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`job .getAll() : gets all available jobs 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.snapshot",
|
||||
"name": "jobTest",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`job .getAll() : gets all available jobs 2`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.snapshot",
|
||||
"name": "jobTest2",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`job .set() : fails trying to set a job without job.id 1`] = `[JsonRpcError: invalid parameters]`;
|
||||
|
||||
exports[`job .set() : sets a job 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.clone",
|
||||
"name": "jobTest",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
223
packages/xo-server-test/src/job/job.spec.js
Normal file
223
packages/xo-server-test/src/job/job.spec.js
Normal file
@@ -0,0 +1,223 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { difference, keyBy } from 'lodash'
|
||||
|
||||
import config from '../_config'
|
||||
import xo, { testWithOtherConnection } from '../_xoConnection'
|
||||
|
||||
const ADMIN_USER = {
|
||||
email: 'admin2@admin.net',
|
||||
password: 'admin',
|
||||
permission: 'admin',
|
||||
}
|
||||
|
||||
describe('job', () => {
|
||||
let defaultJob
|
||||
|
||||
beforeAll(() => {
|
||||
defaultJob = {
|
||||
name: 'jobTest',
|
||||
timeout: 2000,
|
||||
type: 'call',
|
||||
key: 'snapshot',
|
||||
method: 'vm.snapshot',
|
||||
paramsVector: {
|
||||
type: 'crossProduct',
|
||||
items: [
|
||||
{
|
||||
type: 'set',
|
||||
values: [
|
||||
{
|
||||
id: config.vms.default,
|
||||
name: 'test-snapshot',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
describe('.create() :', () => {
|
||||
it('creates a new job', async () => {
|
||||
const userId = await xo.createTempUser(ADMIN_USER)
|
||||
const { email, password } = ADMIN_USER
|
||||
await testWithOtherConnection({ email, password }, async xo => {
|
||||
const id = await xo.call('job.create', { job: defaultJob })
|
||||
expect(typeof id).toBe('string')
|
||||
|
||||
const job = await xo.call('job.get', { id })
|
||||
expect(job).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(job.paramsVector).toEqual(defaultJob.paramsVector)
|
||||
expect(job.userId).toBe(userId)
|
||||
await xo.call('job.delete', { id })
|
||||
})
|
||||
})
|
||||
|
||||
it('creates a job with a userId', async () => {
|
||||
const userId = await xo.createTempUser(ADMIN_USER)
|
||||
const id = await xo.createTempJob({ ...defaultJob, userId })
|
||||
const { userId: expectedUserId } = await xo.call('job.get', { id })
|
||||
expect(userId).toBe(expectedUserId)
|
||||
})
|
||||
|
||||
it('fails trying to create a job without job params', async () => {
|
||||
await expect(xo.createTempJob({})).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.getAll() :', () => {
|
||||
it('gets all available jobs', async () => {
|
||||
const jobId1 = await xo.createTempJob(defaultJob)
|
||||
const job2 = {
|
||||
...defaultJob,
|
||||
name: 'jobTest2',
|
||||
paramsVector: {
|
||||
type: 'crossProduct',
|
||||
items: [
|
||||
{
|
||||
type: 'set',
|
||||
values: [
|
||||
{
|
||||
id: config.vms.default,
|
||||
name: 'test2-snapshot',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
const jobId2 = await xo.createTempJob(job2)
|
||||
let jobs = await xo.call('job.getAll')
|
||||
expect(Array.isArray(jobs)).toBe(true)
|
||||
jobs = keyBy(jobs, 'id')
|
||||
|
||||
const newJob1 = jobs[jobId1]
|
||||
expect(newJob1).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(newJob1.paramsVector).toEqual(defaultJob.paramsVector)
|
||||
|
||||
const newJob2 = jobs[jobId2]
|
||||
expect(newJob2).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(newJob2.paramsVector).toEqual(job2.paramsVector)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.get() :', () => {
|
||||
it('gets an existing job', async () => {
|
||||
const id = await xo.createTempJob(defaultJob)
|
||||
const job = await xo.call('job.get', { id })
|
||||
expect(job).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(job.paramsVector).toEqual(defaultJob.paramsVector)
|
||||
})
|
||||
|
||||
it('fails trying to get a job with a non existent id', async () => {
|
||||
await expect(
|
||||
xo.call('job.get', { id: 'non-existent-id' })
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.set() :', () => {
|
||||
it('sets a job', async () => {
|
||||
const id = await xo.createTempJob(defaultJob)
|
||||
const job = {
|
||||
id,
|
||||
type: 'call',
|
||||
key: 'snapshot',
|
||||
method: 'vm.clone',
|
||||
paramsVector: {
|
||||
type: 'crossProduct',
|
||||
items: [
|
||||
{
|
||||
type: 'set',
|
||||
values: [
|
||||
{
|
||||
id: config.vms.default,
|
||||
name: 'clone',
|
||||
full_copy: true,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
await xo.call('job.set', {
|
||||
job,
|
||||
})
|
||||
|
||||
const newJob = await xo.call('job.get', { id })
|
||||
expect(newJob).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(newJob.paramsVector).toEqual(job.paramsVector)
|
||||
})
|
||||
|
||||
it('fails trying to set a job without job.id', async () => {
|
||||
await expect(xo.call('job.set', defaultJob)).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.delete() :', () => {
|
||||
it('deletes an existing job', async () => {
|
||||
const id = await xo.call('job.create', { job: defaultJob })
|
||||
const { id: scheduleId } = await xo.call('schedule.create', {
|
||||
jobId: id,
|
||||
cron: '* * * * * *',
|
||||
enabled: false,
|
||||
})
|
||||
await xo.call('job.delete', { id })
|
||||
await expect(xo.call('job.get', { id })).rejects.toMatchSnapshot()
|
||||
await expect(
|
||||
xo.call('schedule.get', { id: scheduleId })
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it.skip('fails trying to delete a job with a non existent id', async () => {
|
||||
await expect(
|
||||
xo.call('job.delete', { id: 'non-existent-id' })
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.runSequence() :', () => {
|
||||
let id
|
||||
|
||||
afterEach(async () => {
|
||||
await xo
|
||||
.call('vm.delete', { id, deleteDisks: true })
|
||||
.catch(error => console.error(error))
|
||||
})
|
||||
|
||||
it('runs a job', async () => {
|
||||
const jobId = await xo.createTempJob(defaultJob)
|
||||
const snapshots = xo.objects.all[config.vms.default].snapshots
|
||||
await xo.call('job.runSequence', { idSequence: [jobId] })
|
||||
await xo.waitObjectState(
|
||||
config.vms.default,
|
||||
({ snapshots: actualSnapshots }) => {
|
||||
expect(actualSnapshots.length).toBe(snapshots.length + 1)
|
||||
id = difference(actualSnapshots, snapshots)[0]
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
156
packages/xo-server-test/src/old-tests/disk.spec.js
Normal file
156
packages/xo-server-test/src/old-tests/disk.spec.js
Normal file
@@ -0,0 +1,156 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import { getConfig, getMainConnection, getSrId, waitObjectState } from './util'
|
||||
import { map, assign } from 'lodash'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('disk', () => {
|
||||
let diskId
|
||||
let diskIds = []
|
||||
let serverId
|
||||
let srId
|
||||
let xo
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
xo = await getMainConnection()
|
||||
|
||||
const config = await getConfig()
|
||||
serverId = await xo.call(
|
||||
'server.add',
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', { id: serverId })
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
srId = await getSrId(xo)
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
map(diskIds, diskId => xo.call('vdi.delete', { id: diskId }))
|
||||
)
|
||||
diskIds = []
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', { id: serverId })
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async function createDisk(params) {
|
||||
const id = await xo.call('disk.create', params)
|
||||
diskIds.push(id)
|
||||
return id
|
||||
}
|
||||
|
||||
async function createDiskTest() {
|
||||
const id = await createDisk({
|
||||
name: 'diskTest',
|
||||
size: '1GB',
|
||||
sr: srId,
|
||||
})
|
||||
return id
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.create()', () => {
|
||||
it('create a new disk on a SR', async () => {
|
||||
diskId = await createDisk({
|
||||
name: 'diskTest',
|
||||
size: '1GB',
|
||||
sr: srId,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.type).to.be.equal('VDI')
|
||||
expect(disk.name_label).to.be.equal('diskTest')
|
||||
// TODO: should not test an exact value but around 10%
|
||||
expect(disk.size).to.be.equal(1000341504)
|
||||
expect(disk.$SR).to.be.equal(srId)
|
||||
}),
|
||||
waitObjectState(xo, srId, sr => {
|
||||
expect(sr.VDIs).include(diskId)
|
||||
}),
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
beforeEach(async () => {
|
||||
diskId = await createDiskTest()
|
||||
})
|
||||
|
||||
it('deletes a disk', async () => {
|
||||
await Promise.all([
|
||||
xo.call('vdi.delete', { id: diskId }),
|
||||
waitObjectState(xo, diskId, disk => {
|
||||
expect(disk).to.be.undefined()
|
||||
}),
|
||||
waitObjectState(xo, srId, sr => {
|
||||
expect(sr.VDIs).not.include(diskId)
|
||||
}),
|
||||
])
|
||||
diskIds = []
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
describe('.set()', () => {
|
||||
beforeEach(async () => {
|
||||
diskId = await createDiskTest()
|
||||
})
|
||||
|
||||
it('set the name of the disk', async () => {
|
||||
await xo.call('vdi.set', {
|
||||
id: diskId,
|
||||
name_label: 'disk2',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.name_label).to.be.equal('disk2')
|
||||
})
|
||||
})
|
||||
|
||||
it('set the description of the disk', async () => {
|
||||
await xo.call('vdi.set', {
|
||||
id: diskId,
|
||||
name_description: 'description',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.name_description).to.be.equal('description')
|
||||
})
|
||||
})
|
||||
|
||||
it.skip('set the size of the disk', async () => {
|
||||
await xo.getOrWaitObject(diskId)
|
||||
await xo.call('vdi.set', {
|
||||
id: diskId,
|
||||
size: '5MB',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.size).to.be.equal(6291456)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
59
packages/xo-server-test/src/old-tests/docker.spec.js
Normal file
59
packages/xo-server-test/src/old-tests/docker.spec.js
Normal file
@@ -0,0 +1,59 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
// import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// import {getConnection} from './util'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('docker', () => {
|
||||
// let xo
|
||||
// beforeAll(async () => {
|
||||
// xo = await getConnection()
|
||||
// })
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.register()', async () => {
|
||||
it('registers the VM for Docker management')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.deregister()', async () => {
|
||||
it('deregister the VM for Docker management')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.start()', async () => {
|
||||
it('starts the Docker')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.stop()', async () => {
|
||||
it('stops the Docker')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.restart()', async () => {
|
||||
it('restarts the Docker')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.pause()', async () => {
|
||||
it('pauses the Docker')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.unpause()', async () => {
|
||||
it('unpauses the Docker')
|
||||
})
|
||||
})
|
||||
377
packages/xo-server-test/src/old-tests/group.spec.js
Normal file
377
packages/xo-server-test/src/old-tests/group.spec.js
Normal file
@@ -0,0 +1,377 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { find, map } from 'lodash'
|
||||
|
||||
import { createUser, deleteUsers, getUser, xo } from './util.js'
|
||||
|
||||
// ===================================================================
|
||||
describe('group', () => {
|
||||
const userIds = []
|
||||
const groupIds = []
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(map(groupIds, id => xo.call('group.delete', { id })))
|
||||
// Deleting users must be done AFTER deleting the group
|
||||
// because there is a race condition in xo-server
|
||||
// which cause some users to not be properly deleted.
|
||||
|
||||
// The test “delete the group with its users” highlight this issue.
|
||||
await deleteUsers(xo, userIds)
|
||||
userIds.length = groupIds.length = 0
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async function createGroup(params) {
|
||||
const groupId = await xo.call('group.create', params)
|
||||
groupIds.push(groupId)
|
||||
return groupId
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
function compareGroup(actual, expected) {
|
||||
expect(actual.name).toEqual(expected.name)
|
||||
expect(actual.id).toEqual(expected.id)
|
||||
expect(actual.users).toEqual(expected.users)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
function getAllGroups() {
|
||||
return xo.call('group.getAll')
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
async function getGroup(id) {
|
||||
const groups = await getAllGroups()
|
||||
return find(groups, { id: id })
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.create()', () => {
|
||||
it('creates a group and return its id', async () => {
|
||||
const groupId = await createGroup({
|
||||
name: 'Avengers',
|
||||
})
|
||||
const group = await getGroup(groupId)
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [],
|
||||
})
|
||||
})
|
||||
|
||||
it.skip('does not create two groups with the same name', async () => {
|
||||
await createGroup({
|
||||
name: 'Avengers',
|
||||
})
|
||||
|
||||
await createGroup({
|
||||
name: 'Avengers',
|
||||
}).then(
|
||||
() => {
|
||||
throw new Error('createGroup() should have thrown')
|
||||
},
|
||||
function(error) {
|
||||
expect(error.message).to.match(/duplicate group/i)
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
let groupId
|
||||
let userId1
|
||||
let userId2
|
||||
let userId3
|
||||
beforeEach(async () => {
|
||||
groupId = await xo.call('group.create', {
|
||||
name: 'Avengers',
|
||||
})
|
||||
})
|
||||
it('delete a group', async () => {
|
||||
await xo.call('group.delete', {
|
||||
id: groupId,
|
||||
})
|
||||
const group = await getGroup(groupId)
|
||||
expect(group).toBeUndefined()
|
||||
})
|
||||
|
||||
it.skip("erase the group from user's groups list", async () => {
|
||||
// create user and add it to the group
|
||||
const userId = await createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
})
|
||||
await xo.call('group.addUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
|
||||
// delete the group
|
||||
await xo.call('group.delete', { id: groupId })
|
||||
const user = await getUser(userId)
|
||||
expect(user.groups).toEqual([])
|
||||
})
|
||||
|
||||
it.skip("erase the user from group's users list", async () => {
|
||||
// create user and add it to the group
|
||||
const userId = await createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
})
|
||||
await xo.call('group.addUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
|
||||
// delete the group
|
||||
await xo.call('user.delete', { id: userId })
|
||||
const group = await getGroup(groupId)
|
||||
expect(group.users).toEqual([])
|
||||
})
|
||||
|
||||
// FIXME: some users are not properly deleted because of a race condition with group deletion.
|
||||
it.skip('delete the group with its users', async () => {
|
||||
// create users
|
||||
;[userId1, userId2, userId3] = await Promise.all([
|
||||
xo.call('user.create', {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
}),
|
||||
xo.call('user.create', {
|
||||
email: 'natasha.romanov@shield.com',
|
||||
password: 'BlackWidow',
|
||||
}),
|
||||
xo.call('user.create', {
|
||||
email: 'pietro.maximoff@shield.com',
|
||||
password: 'QickSilver',
|
||||
}),
|
||||
])
|
||||
|
||||
await xo.call('group.setUsers', {
|
||||
id: groupId,
|
||||
userIds: [userId1, userId2, userId3],
|
||||
})
|
||||
|
||||
// delete the group with his users
|
||||
await Promise.all([
|
||||
xo.call('group.delete', {
|
||||
id: groupId,
|
||||
}),
|
||||
deleteUsers(xo, [userId1, userId2, userId3]),
|
||||
])
|
||||
|
||||
const [group, user1, user2, user3] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId1),
|
||||
getUser(xo, userId2),
|
||||
getUser(xo, userId3),
|
||||
])
|
||||
|
||||
expect(group).toBeUndefined()
|
||||
expect(user1).toBeUndefined()
|
||||
expect(user2).toBeUndefined()
|
||||
expect(user3).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.getAll()', () => {
|
||||
it('returns an array', async () => {
|
||||
const groups = await xo.call('group.getAll')
|
||||
expect(groups).toBeInstanceOf(Array)
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.setUsers ()', () => {
|
||||
let groupId
|
||||
let userId1
|
||||
let userId2
|
||||
let userId3
|
||||
beforeEach(async () => {
|
||||
;[groupId, userId1, userId2, userId3] = await Promise.all([
|
||||
createGroup({
|
||||
name: 'Avengers',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'natasha.romanov@shield.com',
|
||||
password: 'BlackWidow',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'pietro.maximoff@shield.com',
|
||||
password: 'QickSilver',
|
||||
}),
|
||||
])
|
||||
})
|
||||
|
||||
it('can set users of a group', async () => {
|
||||
// add two users on the group
|
||||
await xo.call('group.setUsers', {
|
||||
id: groupId,
|
||||
userIds: [userId1, userId2],
|
||||
})
|
||||
{
|
||||
const [group, user1, user2, user3] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId1),
|
||||
getUser(xo, userId2),
|
||||
getUser(xo, userId3),
|
||||
])
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [userId1, userId2],
|
||||
})
|
||||
|
||||
expect(user1.groups).toEqual([groupId])
|
||||
expect(user2.groups).toEqual([groupId])
|
||||
expect(user3.groups).toEqual([])
|
||||
}
|
||||
|
||||
// change users of the group
|
||||
await xo.call('group.setUsers', {
|
||||
id: groupId,
|
||||
userIds: [userId1, userId3],
|
||||
})
|
||||
{
|
||||
const [group, user1, user2, user3] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId1),
|
||||
getUser(xo, userId2),
|
||||
getUser(xo, userId3),
|
||||
])
|
||||
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [userId1, userId3],
|
||||
})
|
||||
|
||||
expect(user1.groups).toEqual([groupId])
|
||||
expect(user2.groups).toEqual([])
|
||||
expect(user3.groups).toEqual([groupId])
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.addUser()', () => {
|
||||
let groupId
|
||||
let userId
|
||||
beforeEach(async () => {
|
||||
;[groupId, userId] = await Promise.all([
|
||||
createGroup({
|
||||
name: 'Avengers',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
}),
|
||||
])
|
||||
})
|
||||
|
||||
it('adds a user id to a group', async () => {
|
||||
await xo.call('group.addUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
|
||||
const [group, user] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId),
|
||||
])
|
||||
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [userId],
|
||||
})
|
||||
|
||||
expect(user.groups).toEqual([groupId])
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('removeUser()', () => {
|
||||
let groupId
|
||||
let userId
|
||||
beforeEach(async () => {
|
||||
;[groupId, userId] = await Promise.all([
|
||||
createGroup({
|
||||
name: 'Avengers',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
}),
|
||||
])
|
||||
|
||||
await xo.call('group.addUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
})
|
||||
|
||||
it('removes a user to a group', async () => {
|
||||
await xo.call('group.removeUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
|
||||
const [group, user] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId),
|
||||
])
|
||||
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [],
|
||||
})
|
||||
|
||||
expect(user.groups).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('set()', () => {
|
||||
let groupId
|
||||
beforeEach(async () => {
|
||||
groupId = await createGroup({
|
||||
name: 'Avengers',
|
||||
})
|
||||
})
|
||||
|
||||
it('changes name of a group', async () => {
|
||||
await xo.call('group.set', {
|
||||
id: groupId,
|
||||
name: 'Guardians of the Galaxy',
|
||||
})
|
||||
|
||||
const group = await getGroup(groupId)
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Guardians of the Galaxy',
|
||||
users: [],
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
239
packages/xo-server-test/src/old-tests/host.spec.js
Normal file
239
packages/xo-server-test/src/old-tests/host.spec.js
Normal file
@@ -0,0 +1,239 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
|
||||
import expect from 'must'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import {
|
||||
getAllHosts,
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getVmToMigrateId,
|
||||
waitObjectState,
|
||||
} from './util'
|
||||
import { find, forEach } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('host', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let hostId
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
let config
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
serverId = await xo.call('server.add', config.xenServer2).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
hostId = getHost(config.host1)
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', {
|
||||
id: serverId,
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
function getHost(nameLabel) {
|
||||
const hosts = getAllHosts(xo)
|
||||
const host = find(hosts, { name_label: nameLabel })
|
||||
return host.id
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.set()', () => {
|
||||
let nameLabel
|
||||
let nameDescription
|
||||
|
||||
beforeEach(async () => {
|
||||
// get values to set them at the end of the test
|
||||
const host = xo.objects.all[hostId]
|
||||
nameLabel = host.name_label
|
||||
nameDescription = host.name_description
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('host.set', {
|
||||
id: hostId,
|
||||
name_label: nameLabel,
|
||||
name_description: nameDescription,
|
||||
})
|
||||
})
|
||||
|
||||
it('changes properties of the host', async () => {
|
||||
await xo.call('host.set', {
|
||||
id: hostId,
|
||||
name_label: 'labTest',
|
||||
name_description: 'description',
|
||||
})
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.name_label).to.be.equal('labTest')
|
||||
expect(host.name_description).to.be.equal('description')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.restart()', () => {
|
||||
jest.setTimeout(330e3)
|
||||
it('restart the host', async () => {
|
||||
await xo.call('host.restart', { id: hostId })
|
||||
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.current_operations)
|
||||
})
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Halted')
|
||||
})
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Running')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.restartAgent()', () => {
|
||||
it('restart a Xen agent on the host')
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.start()', () => {
|
||||
jest.setTimeout(300e3)
|
||||
beforeEach(async () => {
|
||||
try {
|
||||
await xo.call('host.stop', { id: hostId })
|
||||
} catch (_) {}
|
||||
|
||||
// test if the host is shutdown
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Halted')
|
||||
})
|
||||
})
|
||||
|
||||
it('start the host', async () => {
|
||||
await xo.call('host.start', { id: hostId })
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Running')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.stop()', () => {
|
||||
jest.setTimeout(300e3)
|
||||
let vmId
|
||||
|
||||
beforeAll(async () => {
|
||||
vmId = await getVmToMigrateId(xo)
|
||||
try {
|
||||
await xo.call('vm.start', { id: vmId })
|
||||
} catch (_) {}
|
||||
try {
|
||||
await xo.call('vm.migrate', {
|
||||
vm: vmId,
|
||||
host: hostId,
|
||||
})
|
||||
} catch (_) {}
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('host.start', { id: hostId })
|
||||
})
|
||||
|
||||
it('stop the host and shutdown its VMs', async () => {
|
||||
await xo.call('host.stop', { id: hostId })
|
||||
await Promise.all([
|
||||
waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.$container).not.to.be.equal(hostId)
|
||||
expect(vm.power_state).to.be.equal('Halted')
|
||||
}),
|
||||
waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Halted')
|
||||
}),
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.detach()', () => {
|
||||
it('ejects the host of a pool')
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.disable(), ', () => {
|
||||
afterEach(async () => {
|
||||
await xo.call('host.enable', {
|
||||
id: hostId,
|
||||
})
|
||||
})
|
||||
|
||||
it('disables to create VM on the host', async () => {
|
||||
await xo.call('host.disable', { id: hostId })
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.enabled).to.be.false()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.enable()', async () => {
|
||||
beforeEach(async () => {
|
||||
await xo.call('host.disable', { id: hostId })
|
||||
})
|
||||
|
||||
it('enables to create VM on the host', async () => {
|
||||
await xo.call('host.enable', { id: hostId })
|
||||
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.enabled).to.be.true()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
describe('.createNetwork()', () => {
|
||||
it('create a network')
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.listMissingPatches()', () => {
|
||||
it('returns an array of missing patches in the host')
|
||||
it('returns a empty array if up-to-date')
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.installPatch()', () => {
|
||||
it('installs a patch patch on the host')
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.stats()', () => {
|
||||
it('returns an array with statistics of the host', async () => {
|
||||
const stats = await xo.call('host.stats', {
|
||||
host: hostId,
|
||||
})
|
||||
expect(stats).to.be.an.object()
|
||||
|
||||
forEach(stats, function(array, key) {
|
||||
expect(array).to.be.an.array()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
79
packages/xo-server-test/src/old-tests/pool.spec.js
Normal file
79
packages/xo-server-test/src/old-tests/pool.spec.js
Normal file
@@ -0,0 +1,79 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import { getConfig, getMainConnection, waitObjectState } from './util'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import { find } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('pool', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let poolId
|
||||
let config
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
serverId = await xo.call('server.add', config.xenServer1).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
poolId = getPoolId()
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', {
|
||||
id: serverId,
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
function getPoolId() {
|
||||
const pools = xo.objects.indexes.type.pool
|
||||
const pool = find(pools, { name_label: config.pool.name_label })
|
||||
return pool.id
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.set()', () => {
|
||||
afterEach(async () => {
|
||||
await xo.call('pool.set', {
|
||||
id: poolId,
|
||||
name_label: config.pool.name_label,
|
||||
name_description: '',
|
||||
})
|
||||
})
|
||||
it.skip('set pool parameters', async () => {
|
||||
await xo.call('pool.set', {
|
||||
id: poolId,
|
||||
name_label: 'nameTest',
|
||||
name_description: 'description',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, poolId, pool => {
|
||||
expect(pool.name_label).to.be.equal('nameTest')
|
||||
expect(pool.name_description).to.be.equal('description')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.installPatch()', () => {
|
||||
it('install a patch on the pool')
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('handlePatchUpload()', () => {
|
||||
it('')
|
||||
})
|
||||
})
|
||||
33
packages/xo-server-test/src/old-tests/role.spec.js
Normal file
33
packages/xo-server-test/src/old-tests/role.spec.js
Normal file
@@ -0,0 +1,33 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { xo } from './util'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('role', () => {
|
||||
describe('.getAll()', () => {
|
||||
it(' returns all the roles', async () => {
|
||||
const role = await xo.call('role.getAll')
|
||||
|
||||
// FIXME: use permutationOf but figure out how not to compare objects by
|
||||
// equality.
|
||||
expect(role).toEqual([
|
||||
{
|
||||
id: 'viewer',
|
||||
name: 'Viewer',
|
||||
permissions: ['view'],
|
||||
},
|
||||
{
|
||||
id: 'operator',
|
||||
name: 'Operator',
|
||||
permissions: ['view', 'operate'],
|
||||
},
|
||||
{
|
||||
id: 'admin',
|
||||
name: 'Admin',
|
||||
permissions: ['view', 'operate', 'administrate'],
|
||||
},
|
||||
])
|
||||
})
|
||||
})
|
||||
})
|
||||
149
packages/xo-server-test/src/old-tests/schedule.spec.js
Normal file
149
packages/xo-server-test/src/old-tests/schedule.spec.js
Normal file
@@ -0,0 +1,149 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import {
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getSchedule,
|
||||
jobTest,
|
||||
scheduleTest,
|
||||
} from './util'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import { map } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('schedule', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let scheduleIds = []
|
||||
let jobId
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
let config
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
|
||||
serverId = await xo.call('server.add', config.xenServer1).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
jobId = await jobTest(xo)
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await Promise.all([
|
||||
xo.call('job.delete', { id: jobId }),
|
||||
xo.call('server.remove', { id: serverId }),
|
||||
])
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
map(scheduleIds, scheduleId =>
|
||||
xo.call('schedule.delete', { id: scheduleId })
|
||||
)
|
||||
)
|
||||
scheduleIds = []
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async function createSchedule(params) {
|
||||
const schedule = await xo.call('schedule.create', params)
|
||||
scheduleIds.push(schedule.id)
|
||||
return schedule
|
||||
}
|
||||
|
||||
async function createScheduleTest() {
|
||||
const schedule = await scheduleTest(xo, jobId)
|
||||
scheduleIds.push(schedule.id)
|
||||
return schedule
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.getAll()', () => {
|
||||
it('gets all existing schedules', async () => {
|
||||
const schedules = await xo.call('schedule.getAll')
|
||||
expect(schedules).to.be.an.array()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.get()', () => {
|
||||
let scheduleId
|
||||
beforeAll(async () => {
|
||||
scheduleId = (await createScheduleTest()).id
|
||||
})
|
||||
|
||||
it('gets an existing schedule', async () => {
|
||||
const schedule = await xo.call('schedule.get', { id: scheduleId })
|
||||
expect(schedule.job).to.be.equal(jobId)
|
||||
expect(schedule.cron).to.be.equal('* * * * * *')
|
||||
expect(schedule.enabled).to.be.false()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.create()', () => {
|
||||
it('creates a new schedule', async () => {
|
||||
const schedule = await createSchedule({
|
||||
jobId: jobId,
|
||||
cron: '* * * * * *',
|
||||
enabled: true,
|
||||
})
|
||||
expect(schedule.job).to.be.equal(jobId)
|
||||
expect(schedule.cron).to.be.equal('* * * * * *')
|
||||
expect(schedule.enabled).to.be.true()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.set()', () => {
|
||||
let scheduleId
|
||||
beforeAll(async () => {
|
||||
scheduleId = (await createScheduleTest()).id
|
||||
})
|
||||
it('modifies an existing schedule', async () => {
|
||||
await xo.call('schedule.set', {
|
||||
id: scheduleId,
|
||||
cron: '2 * * * * *',
|
||||
})
|
||||
|
||||
const schedule = await getSchedule(xo, scheduleId)
|
||||
expect(schedule.cron).to.be.equal('2 * * * * *')
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
let scheduleId
|
||||
beforeEach(async () => {
|
||||
scheduleId = (await createScheduleTest()).id
|
||||
})
|
||||
it('deletes an existing schedule', async () => {
|
||||
await xo.call('schedule.delete', { id: scheduleId })
|
||||
await getSchedule(xo, scheduleId).then(
|
||||
() => {
|
||||
throw new Error('getSchedule() should have thrown')
|
||||
},
|
||||
function(error) {
|
||||
expect(error.message).to.match(/no such object/)
|
||||
}
|
||||
)
|
||||
scheduleIds = []
|
||||
})
|
||||
})
|
||||
})
|
||||
82
packages/xo-server-test/src/old-tests/scheduler.spec.js
Normal file
82
packages/xo-server-test/src/old-tests/scheduler.spec.js
Normal file
@@ -0,0 +1,82 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import {
|
||||
jobTest,
|
||||
scheduleTest,
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getSchedule,
|
||||
} from './util'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('scheduler', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let jobId
|
||||
let scheduleId
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
let config
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
|
||||
serverId = await xo.call('server.add', config.xenServer1).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
jobId = await jobTest(xo)
|
||||
scheduleId = (await scheduleTest(xo, jobId)).id
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await Promise.all([
|
||||
xo.call('schedule.delete', { id: scheduleId }),
|
||||
xo.call('job.delete', { id: jobId }),
|
||||
xo.call('server.remove', { id: serverId }),
|
||||
])
|
||||
})
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.enable()', () => {
|
||||
afterEach(async () => {
|
||||
await xo.call('scheduler.disable', { id: scheduleId })
|
||||
})
|
||||
it.skip("enables a schedule to run it's job as scheduled", async () => {
|
||||
await xo.call('scheduler.enable', { id: scheduleId })
|
||||
const schedule = await getSchedule(xo, scheduleId)
|
||||
expect(schedule.enabled).to.be.true()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.disable()', () => {
|
||||
beforeEach(async () => {
|
||||
await xo.call('schedule.enable', { id: scheduleId })
|
||||
})
|
||||
it.skip('disables a schedule', async () => {
|
||||
await xo.call('schedule.disable', { id: scheduleId })
|
||||
const schedule = await getSchedule(xo, scheduleId)
|
||||
expect(schedule.enabled).to.be.false()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.getScheduleTable()', () => {
|
||||
it('get a map of existing schedules', async () => {
|
||||
const table = await xo.call('scheduler.getScheduleTable')
|
||||
expect(table).to.be.an.object()
|
||||
expect(table).to.match(scheduleId)
|
||||
})
|
||||
})
|
||||
})
|
||||
208
packages/xo-server-test/src/old-tests/server.spec.js
Normal file
208
packages/xo-server-test/src/old-tests/server.spec.js
Normal file
@@ -0,0 +1,208 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { assign, find, map } from 'lodash'
|
||||
|
||||
import { config, rejectionOf, xo } from './util'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('server', () => {
|
||||
let serverIds = []
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
map(serverIds, serverId => xo.call('server.remove', { id: serverId }))
|
||||
)
|
||||
serverIds = []
|
||||
})
|
||||
|
||||
async function addServer(params) {
|
||||
const serverId = await xo.call('server.add', params)
|
||||
serverIds.push(serverId)
|
||||
return serverId
|
||||
}
|
||||
|
||||
function getAllServers() {
|
||||
return xo.call('server.getAll')
|
||||
}
|
||||
|
||||
async function getServer(id) {
|
||||
const servers = await getAllServers()
|
||||
return find(servers, { id: id })
|
||||
}
|
||||
|
||||
// ==================================================================
|
||||
|
||||
describe('.add()', () => {
|
||||
it('add a Xen server and return its id', async () => {
|
||||
const serverId = await addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(typeof server.id).toBe('string')
|
||||
expect(server).toEqual({
|
||||
id: serverId,
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
status: 'disconnected',
|
||||
})
|
||||
})
|
||||
|
||||
it('does not add two servers with the same host', async () => {
|
||||
await addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
expect(
|
||||
(await rejectionOf(
|
||||
addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
)).message
|
||||
).toBe('unknown error from the peer')
|
||||
})
|
||||
|
||||
it('set autoConnect true by default', async () => {
|
||||
const serverId = await addServer(config.xenServer1)
|
||||
const server = await getServer(serverId)
|
||||
|
||||
expect(server.id).toBe(serverId)
|
||||
expect(server.host).toBe('192.168.100.3')
|
||||
expect(server.username).toBe('root')
|
||||
expect(server.status).toMatch(/^connect(?:ed|ing)$/)
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.remove()', () => {
|
||||
let serverId
|
||||
beforeEach(async () => {
|
||||
serverId = await addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
})
|
||||
|
||||
it('remove a Xen server', async () => {
|
||||
await xo.call('server.remove', {
|
||||
id: serverId,
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.getAll()', () => {
|
||||
it('returns an array', async () => {
|
||||
const servers = await xo.call('server.getAll')
|
||||
|
||||
expect(servers).toBeInstanceOf(Array)
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.set()', () => {
|
||||
let serverId
|
||||
beforeEach(async () => {
|
||||
serverId = await addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
})
|
||||
|
||||
it('changes attributes of an existing server', async () => {
|
||||
await xo.call('server.set', {
|
||||
id: serverId,
|
||||
username: 'root2',
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server).toEqual({
|
||||
id: serverId,
|
||||
host: 'xen1.example.org',
|
||||
username: 'root2',
|
||||
status: 'disconnected',
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.connect()', () => {
|
||||
jest.setTimeout(5e3)
|
||||
|
||||
it('connects to a Xen server', async () => {
|
||||
const serverId = await addServer(
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
|
||||
await xo.call('server.connect', {
|
||||
id: serverId,
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server).toEqual({
|
||||
enabled: 'true',
|
||||
id: serverId,
|
||||
host: '192.168.100.3',
|
||||
username: 'root',
|
||||
status: 'connected',
|
||||
})
|
||||
})
|
||||
|
||||
it.skip('connect to a Xen server on a slave host', async () => {
|
||||
const serverId = await addServer(config.slaveServer)
|
||||
await xo.call('server.connect', { id: serverId })
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server.status).toBe('connected')
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.disconnect()', () => {
|
||||
jest.setTimeout(5e3)
|
||||
let serverId
|
||||
beforeEach(async () => {
|
||||
serverId = await addServer(
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', {
|
||||
id: serverId,
|
||||
})
|
||||
})
|
||||
|
||||
it('disconnects to a Xen server', async () => {
|
||||
await xo.call('server.disconnect', {
|
||||
id: serverId,
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server).toEqual({
|
||||
id: serverId,
|
||||
host: '192.168.100.3',
|
||||
username: 'root',
|
||||
status: 'disconnected',
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
53
packages/xo-server-test/src/old-tests/token.spec.js
Normal file
53
packages/xo-server-test/src/old-tests/token.spec.js
Normal file
@@ -0,0 +1,53 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import defer from 'golike-defer'
|
||||
import { map } from 'lodash'
|
||||
|
||||
import { getConnection, rejectionOf, testConnection, xo } from './util.js'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('token', () => {
|
||||
const tokens = []
|
||||
|
||||
afterAll(async () => {
|
||||
await Promise.all(map(tokens, token => xo.call('token.delete', { token })))
|
||||
})
|
||||
|
||||
async function createToken() {
|
||||
const token = await xo.call('token.create')
|
||||
tokens.push(token)
|
||||
return token
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.create()', () => {
|
||||
it('creates a token string which can be used to sign in', async () => {
|
||||
const token = await createToken()
|
||||
|
||||
await testConnection({ credentials: { token } })
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
it(
|
||||
'deletes a token',
|
||||
defer(async $defer => {
|
||||
const token = await createToken()
|
||||
const xo2 = await getConnection({ credentials: { token } })
|
||||
$defer(() => xo2.close())
|
||||
|
||||
await xo2.call('token.delete', {
|
||||
token,
|
||||
})
|
||||
|
||||
expect(
|
||||
(await rejectionOf(testConnection({ credentials: { token } }))).code
|
||||
).toBe(3)
|
||||
})
|
||||
)
|
||||
})
|
||||
})
|
||||
169
packages/xo-server-test/src/old-tests/vbd.spec.js
Normal file
169
packages/xo-server-test/src/old-tests/vbd.spec.js
Normal file
@@ -0,0 +1,169 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import {
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getVmXoTestPvId,
|
||||
getOneHost,
|
||||
waitObjectState,
|
||||
} from './util'
|
||||
import { assign, map } from 'lodash'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('vbd', () => {
|
||||
let xo
|
||||
let vbdId
|
||||
let diskIds = []
|
||||
let serverId
|
||||
let vmId
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
let config
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
|
||||
serverId = await xo.call(
|
||||
'server.add',
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', { id: serverId })
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
vmId = await getVmXoTestPvId(xo)
|
||||
try {
|
||||
await xo.call('vm.start', { id: vmId })
|
||||
} catch (_) {}
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
beforeEach(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
vbdId = await createVbd()
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
map(diskIds, diskId => xo.call('vdi.delete', { id: diskId }))
|
||||
)
|
||||
diskIds = []
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
jest.setTimeout(5e3)
|
||||
await Promise.all([
|
||||
xo.call('vm.stop', { id: vmId }),
|
||||
xo.call('server.remove', { id: serverId }),
|
||||
])
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
async function createVbd() {
|
||||
// Create disk
|
||||
const pool = await xo.getOrWaitObject(getOneHost(xo).$poolId)
|
||||
const diskId = await xo.call('disk.create', {
|
||||
name: 'diskTest',
|
||||
size: '1MB',
|
||||
sr: pool.default_SR,
|
||||
})
|
||||
diskIds.push(diskId)
|
||||
|
||||
// Create VBD
|
||||
await xo.call('vm.attachDisk', {
|
||||
vm: vmId,
|
||||
vdi: diskId,
|
||||
})
|
||||
const disk = await xo.waitObject(diskId)
|
||||
return disk.$VBDs[0]
|
||||
}
|
||||
|
||||
// =====================================================================
|
||||
|
||||
describe('.delete()', () => {
|
||||
it('delete the VBD', async () => {
|
||||
await xo.call('vbd.disconnect', { id: vbdId })
|
||||
await xo.call('vbd.delete', { id: vbdId })
|
||||
|
||||
await waitObjectState(xo, vbdId, vbd => {
|
||||
expect(vbd).to.be.undefined()
|
||||
})
|
||||
})
|
||||
|
||||
it('deletes the VBD only if it is deconnected', async () => {
|
||||
await xo.call('vbd.delete', { id: vbdId }).then(
|
||||
() => {
|
||||
throw new Error('vbd.delete() should have thrown')
|
||||
},
|
||||
function(error) {
|
||||
// TODO: check with Julien if it is ok
|
||||
expect(error.message).to.match('unknown error from the peer')
|
||||
}
|
||||
)
|
||||
await xo.call('vbd.disconnect', { id: vbdId })
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
|
||||
describe('.disconnect()', () => {
|
||||
it('disconnect the VBD', async () => {
|
||||
await xo.call('vbd.disconnect', { id: vbdId })
|
||||
await waitObjectState(xo, vbdId, vbd => {
|
||||
expect(vbd.attached).to.be.false()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.connect()', () => {
|
||||
beforeEach(async () => {
|
||||
await xo.call('vbd.disconnect', { id: vbdId })
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vbd.disconnect', { id: vbdId })
|
||||
})
|
||||
|
||||
it('connect the VBD', async () => {
|
||||
await xo.call('vbd.connect', { id: vbdId })
|
||||
|
||||
await waitObjectState(xo, vbdId, vbd => {
|
||||
expect(vbd.attached).to.be.true()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
describe('.set()', () => {
|
||||
afterEach(async () => {
|
||||
await xo.call('vbd.disconnect', { id: vbdId })
|
||||
})
|
||||
|
||||
// TODO: resolve problem with disconnect
|
||||
it.skip('set the position of the VBD', async () => {
|
||||
await xo.call('vbd.set', {
|
||||
id: vbdId,
|
||||
position: '10',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vbdId, vbd => {
|
||||
expect(vbd.position).to.be.equal('10')
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
133
packages/xo-server-test/src/old-tests/vif.spec.js
Normal file
133
packages/xo-server-test/src/old-tests/vif.spec.js
Normal file
@@ -0,0 +1,133 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import {
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getNetworkId,
|
||||
waitObjectState,
|
||||
getVmXoTestPvId,
|
||||
} from './util'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import { map } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('vif', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let vifIds = []
|
||||
let vmId
|
||||
let vifId
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
let config
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
|
||||
serverId = await xo.call('server.add', config.xenServer1).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
vmId = await getVmXoTestPvId(xo)
|
||||
try {
|
||||
await xo.call('vm.start', { id: vmId })
|
||||
} catch (_) {}
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
beforeEach(async () => {
|
||||
vifId = await createVif()
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
map(vifIds, vifId => xo.call('vif.delete', { id: vifId }))
|
||||
)
|
||||
vifIds = []
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
jest.setTimeout(5e3)
|
||||
await xo.call('vm.stop', { id: vmId, force: true })
|
||||
await xo.call('server.remove', { id: serverId })
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
async function createVif() {
|
||||
const networkId = await getNetworkId(xo)
|
||||
|
||||
const vifId = await xo.call('vm.createInterface', {
|
||||
vm: vmId,
|
||||
network: networkId,
|
||||
position: '1',
|
||||
})
|
||||
vifIds.push(vifId)
|
||||
|
||||
return vifId
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.delete()', () => {
|
||||
it('deletes a VIF', async () => {
|
||||
await xo.call('vif.disconnect', { id: vifId })
|
||||
await xo.call('vif.delete', { id: vifId })
|
||||
|
||||
await waitObjectState(xo, vifId, vif => {
|
||||
expect(vif).to.be.undefined()
|
||||
})
|
||||
|
||||
vifIds = []
|
||||
})
|
||||
|
||||
it('can not delete a VIF if it is connected', async () => {
|
||||
await xo.call('vif.delete', { id: vifId }).then(
|
||||
() => {
|
||||
throw new Error('vif.delete() should have thrown')
|
||||
},
|
||||
function(error) {
|
||||
expect(error.message).to.be.equal('unknown error from the peer')
|
||||
}
|
||||
)
|
||||
await xo.call('vif.disconnect', { id: vifId })
|
||||
})
|
||||
})
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
describe('.disconnect()', () => {
|
||||
it('disconnects a VIF', async () => {
|
||||
await xo.call('vif.disconnect', { id: vifId })
|
||||
await waitObjectState(xo, vifId, vif => {
|
||||
expect(vif.attached).to.be.false()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
describe('.connect()', () => {
|
||||
beforeEach(async () => {
|
||||
await xo.call('vif.disconnect', { id: vifId })
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vif.disconnect', { id: vifId })
|
||||
})
|
||||
it('connects a VIF', async () => {
|
||||
await xo.call('vif.connect', { id: vifId })
|
||||
await waitObjectState(xo, vifId, vif => {
|
||||
expect(vif.attached).to.be.true()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
666
packages/xo-server-test/src/old-tests/vm.spec.js
Normal file
666
packages/xo-server-test/src/old-tests/vm.spec.js
Normal file
@@ -0,0 +1,666 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import {
|
||||
almostEqual,
|
||||
getAllHosts,
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getNetworkId,
|
||||
getOneHost,
|
||||
getSrId,
|
||||
getVmToMigrateId,
|
||||
getVmXoTestPvId,
|
||||
waitObjectState,
|
||||
} from './util'
|
||||
import { map, find } from 'lodash'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('vm', () => {
|
||||
let xo
|
||||
let vmId
|
||||
let vmIds = []
|
||||
let serverId
|
||||
let config
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
serverId = await xo.call('server.add', config.xenServer1).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
})
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
jest.setTimeout(15e3)
|
||||
await Promise.all(
|
||||
map(vmIds, vmId => xo.call('vm.delete', { id: vmId, delete_disks: true }))
|
||||
)
|
||||
vmIds = []
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', {
|
||||
id: serverId,
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
async function createVm(params) {
|
||||
const vmId = await xo.call('vm.create', params)
|
||||
vmIds.push(vmId)
|
||||
return vmId
|
||||
}
|
||||
|
||||
async function createVmTest() {
|
||||
const templateId = getTemplateId(config.templates.debian)
|
||||
const vmId = await createVm({
|
||||
name_label: 'vmTest',
|
||||
template: templateId,
|
||||
VIFs: [],
|
||||
})
|
||||
return vmId
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
async function getCdVbdPosition(vmId) {
|
||||
const vm = await xo.getOrWaitObject(vmId)
|
||||
for (let i = 0; i < vm.$VBDs.length; i++) {
|
||||
const vbd = await xo.getOrWaitObject(vm.$VBDs[i])
|
||||
if (vbd.is_cd_drive === true) {
|
||||
return vbd.id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function getHostOtherPool(vm) {
|
||||
const hosts = getAllHosts(xo)
|
||||
for (const id in hosts) {
|
||||
if (hosts[id].$poolId !== vm.$poolId) {
|
||||
return id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
function getIsoId() {
|
||||
const vdis = xo.objects.indexes.type.VDI
|
||||
const iso = find(vdis, { name_label: config.iso })
|
||||
return iso.id
|
||||
}
|
||||
|
||||
function getOtherHost(vm) {
|
||||
const hosts = getAllHosts(xo)
|
||||
for (const id in hosts) {
|
||||
if (hosts[id].$poolId === vm.poolId) {
|
||||
if (id !== vm.$container) {
|
||||
return id
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function getTemplateId(nameTemplate) {
|
||||
const templates = xo.objects.indexes.type['VM-template']
|
||||
const template = find(templates, { name_label: nameTemplate })
|
||||
return template.id
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.create()', () => {
|
||||
it('creates a VM with only a name and a template', async () => {
|
||||
const templateId = getTemplateId(config.templates.debian)
|
||||
|
||||
vmId = await createVm({
|
||||
name_label: 'vmTest',
|
||||
template: templateId,
|
||||
VIFs: [],
|
||||
})
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.id).to.be.a.string()
|
||||
expect(vm).to.be.an.object()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.createHVM()', () => {
|
||||
let srId
|
||||
let templateId
|
||||
|
||||
beforeAll(async () => {
|
||||
srId = await getSrId(xo)
|
||||
templateId = getTemplateId(config.templates.otherConfig)
|
||||
})
|
||||
|
||||
it.skip('creates a VM with the Other Config template, three disks, two interfaces and a ISO mounted', async () => {
|
||||
jest.setTimeout(30e3)
|
||||
|
||||
const networkId = await getNetworkId(xo)
|
||||
vmId = await createVm({
|
||||
name_label: 'vmTest',
|
||||
template: templateId,
|
||||
VIFs: [{ network: networkId }, { network: networkId }],
|
||||
VDIs: [
|
||||
{ device: '0', size: 1, SR: srId, type: 'user' },
|
||||
{ device: '1', size: 1, SR: srId, type: 'user' },
|
||||
{ device: '2', size: 1, SR: srId, type: 'user' },
|
||||
],
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.name_label).to.be.equal('vmTest')
|
||||
expect(vm.other.base_template_name).to.be.equal(
|
||||
config.templates.otherConfig
|
||||
)
|
||||
expect(vm.VIFs).to.have.length(2)
|
||||
expect(vm.$VBDs).to.have.length(3)
|
||||
})
|
||||
})
|
||||
|
||||
it.skip('creates a VM with the Other Config template, no disk, no network and a ISO mounted', async () => {
|
||||
vmId = await createVm({
|
||||
name_label: 'vmTest',
|
||||
template: templateId,
|
||||
VIFs: [],
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.other.base_template_name).to.be.equal(
|
||||
config.templates.otherConfig
|
||||
)
|
||||
expect(vm.VIFs).to.have.length(0)
|
||||
expect(vm.$VBDs).to.have.length(0)
|
||||
})
|
||||
})
|
||||
})
|
||||
describe('.createPV()', () => {
|
||||
let srId
|
||||
let templateId
|
||||
let networkId
|
||||
|
||||
beforeAll(async () => {
|
||||
;[networkId, srId] = await Promise.all([getNetworkId(xo), getSrId(xo)])
|
||||
})
|
||||
|
||||
it.skip('creates a VM with the Debian 7 64 bits template, network install, one disk, one network', async () => {
|
||||
templateId = getTemplateId(config.templates.debian)
|
||||
|
||||
vmId = await createVm({
|
||||
name_label: 'vmTest',
|
||||
template: templateId,
|
||||
VIFs: [{ network: networkId }],
|
||||
VDIs: [
|
||||
{
|
||||
device: '0',
|
||||
size: 1,
|
||||
SR: srId,
|
||||
type: 'user',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.other.base_template_name).to.be.equal(
|
||||
config.templates.debian
|
||||
)
|
||||
expect(vm.VIFs).to.have.length(1)
|
||||
expect(vm.$VBDs).to.have.length(1)
|
||||
})
|
||||
})
|
||||
|
||||
it('creates a VM with the CentOS 7 64 bits template, two disks, two networks and a ISO mounted', async () => {
|
||||
jest.setTimeout(10e3)
|
||||
|
||||
templateId = getTemplateId(config.templates.centOS)
|
||||
vmId = await createVm({
|
||||
name_label: 'vmTest',
|
||||
template: templateId,
|
||||
VIFs: [{ network: networkId }, { network: networkId }],
|
||||
VDIs: [
|
||||
{ device: '0', size: 1, SR: srId, type: 'user' },
|
||||
{ device: '1', size: 1, SR: srId, type: 'user' },
|
||||
],
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.other.base_template_name).to.be.equal(
|
||||
config.templates.centOS
|
||||
)
|
||||
expect(vm.VIFs).to.have.length(2)
|
||||
expect(vm.$VBDs).to.have.length(2)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
let snapshotIds = []
|
||||
let diskIds = []
|
||||
|
||||
beforeEach(async () => {
|
||||
vmId = await createVmTest()
|
||||
})
|
||||
|
||||
afterAll(async () => {
|
||||
await Promise.all(
|
||||
map(snapshotIds, snapshotId =>
|
||||
xo.call('vm.delete', { id: snapshotId })
|
||||
),
|
||||
map(diskIds, diskId => xo.call('vdi.delete', { id: diskId }))
|
||||
)
|
||||
})
|
||||
|
||||
it('deletes a VM', async () => {
|
||||
await xo.call('vm.delete', {
|
||||
id: vmId,
|
||||
delete_disks: true,
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm).to.be.undefined()
|
||||
})
|
||||
vmIds = []
|
||||
})
|
||||
|
||||
it('deletes a VM and its snapshots', async () => {
|
||||
const snapshotId = await xo.call('vm.snapshot', {
|
||||
id: vmId,
|
||||
name: 'snapshot',
|
||||
})
|
||||
snapshotIds.push(snapshotId)
|
||||
|
||||
await xo.call('vm.delete', {
|
||||
id: vmId,
|
||||
delete_disks: true,
|
||||
})
|
||||
vmIds = []
|
||||
await waitObjectState(xo, snapshotId, snapshot => {
|
||||
expect(snapshot).to.be.undefined()
|
||||
})
|
||||
snapshotIds = []
|
||||
})
|
||||
|
||||
it('deletes a VM and its disks', async () => {
|
||||
jest.setTimeout(5e3)
|
||||
// create disk
|
||||
const host = getOneHost(xo)
|
||||
const pool = await xo.getOrWaitObject(host.$poolId)
|
||||
|
||||
const diskId = await xo.call('disk.create', {
|
||||
name: 'diskTest',
|
||||
size: '1GB',
|
||||
sr: pool.default_SR,
|
||||
})
|
||||
diskIds.push(diskId)
|
||||
|
||||
// attach the disk on the VM
|
||||
await xo.call('vm.attachDisk', {
|
||||
vm: vmId,
|
||||
vdi: diskId,
|
||||
})
|
||||
|
||||
// delete the VM
|
||||
await xo.call('vm.delete', {
|
||||
id: vmId,
|
||||
delete_disks: true,
|
||||
})
|
||||
vmIds = []
|
||||
await waitObjectState(xo, diskId, disk => {
|
||||
expect(disk).to.be.undefined()
|
||||
})
|
||||
diskIds = []
|
||||
})
|
||||
|
||||
// TODO: do a copy of the ISO
|
||||
it.skip('deletes a vm but not delete its ISO', async () => {
|
||||
vmId = await createVmTest()
|
||||
|
||||
await xo.call('vm.insertCd', {
|
||||
id: vmId,
|
||||
cd_id: '1169eb8a-d43f-4daf-a0ca-f3434a4bf301',
|
||||
force: false,
|
||||
})
|
||||
|
||||
await xo.call('vm.delete', {
|
||||
id: vmId,
|
||||
delete_disks: true,
|
||||
})
|
||||
|
||||
waitObjectState(xo, '1169eb8a-d43f-4daf-a0ca-f3434a4bf301', iso => {
|
||||
expect(iso).not.to.be.undefined()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.migrate', () => {
|
||||
jest.setTimeout(15e3)
|
||||
|
||||
let secondServerId
|
||||
let startHostId
|
||||
let hostId
|
||||
|
||||
beforeAll(async () => {
|
||||
secondServerId = await xo
|
||||
.call('server.add', config.xenServer2)
|
||||
.catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
vmId = await getVmToMigrateId(xo)
|
||||
|
||||
try {
|
||||
await xo.call('vm.start', { id: vmId })
|
||||
} catch (_) {}
|
||||
})
|
||||
beforeEach(async () => {
|
||||
const vm = await xo.getOrWaitObject(vmId)
|
||||
startHostId = vm.$container
|
||||
hostId = getOtherHost(vm)
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vm.migrate', {
|
||||
id: vmId,
|
||||
host_id: startHostId,
|
||||
})
|
||||
})
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', {
|
||||
id: secondServerId,
|
||||
})
|
||||
})
|
||||
|
||||
it('migrates the VM on an other host', async () => {
|
||||
await xo.call('vm.migrate', {
|
||||
id: vmId,
|
||||
host_id: hostId,
|
||||
})
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.$container).to.be.equal(hostId)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.migratePool()', () => {
|
||||
jest.setTimeout(100e3)
|
||||
let hostId
|
||||
let secondServerId
|
||||
let startHostId
|
||||
|
||||
beforeAll(async () => {
|
||||
secondServerId = await xo
|
||||
.call('server.add', config.xenServer2)
|
||||
.catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
vmId = await getVmToMigrateId(xo)
|
||||
|
||||
try {
|
||||
await xo.call('vm.start', { id: vmId })
|
||||
} catch (_) {}
|
||||
})
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', { id: secondServerId })
|
||||
})
|
||||
beforeEach(async () => {
|
||||
const vm = await xo.getOrWaitObject(vmId)
|
||||
startHostId = vm.$container
|
||||
hostId = getHostOtherPool(xo, vm)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
// TODO: try to get the vmId
|
||||
vmId = await getVmToMigrateId(xo)
|
||||
await xo.call('vm.migrate_pool', {
|
||||
id: vmId,
|
||||
target_host_id: startHostId,
|
||||
})
|
||||
})
|
||||
|
||||
it.skip('migrates the VM on an other host which is in an other pool', async () => {
|
||||
await xo.call('vm.migrate_pool', {
|
||||
id: vmId,
|
||||
target_host_id: hostId,
|
||||
})
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm).to.be.undefined()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
|
||||
describe('.clone()', () => {
|
||||
beforeEach(async () => {
|
||||
vmId = await createVmTest()
|
||||
})
|
||||
it('clones a VM', async () => {
|
||||
const cloneId = await xo.call('vm.clone', {
|
||||
id: vmId,
|
||||
name: 'clone',
|
||||
full_copy: true,
|
||||
})
|
||||
// push cloneId in vmIds array to delete the VM after test
|
||||
vmIds.push(cloneId)
|
||||
|
||||
const [vm, clone] = await Promise.all([
|
||||
xo.getOrWaitObject(vmId),
|
||||
xo.getOrWaitObject(cloneId),
|
||||
])
|
||||
expect(clone.type).to.be.equal('VM')
|
||||
expect(clone.name_label).to.be.equal('clone')
|
||||
|
||||
almostEqual(clone, vm, ['name_label', 'ref', 'id', 'other.mac_seed'])
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
|
||||
describe('.convert()', () => {
|
||||
beforeEach(async () => {
|
||||
vmId = await createVmTest()
|
||||
})
|
||||
|
||||
it('converts a VM', async () => {
|
||||
await xo.call('vm.convert', { id: vmId })
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.type).to.be.equal('VM-template')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
describe('.revert()', () => {
|
||||
jest.setTimeout(5e3)
|
||||
let snapshotId
|
||||
beforeEach(async () => {
|
||||
vmId = await createVmTest()
|
||||
snapshotId = await xo.call('vm.snapshot', {
|
||||
id: vmId,
|
||||
name: 'snapshot',
|
||||
})
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vm.delete', { id: snapshotId })
|
||||
})
|
||||
it('reverts a snapshot to its parent VM', async () => {
|
||||
const revert = await xo.call('vm.revert', { id: snapshotId })
|
||||
expect(revert).to.be.true()
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
describe('.handleExport()', () => {
|
||||
it('')
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
|
||||
describe('.import()', () => {
|
||||
it('')
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
describe('.attachDisk()', () => {
|
||||
jest.setTimeout(5e3)
|
||||
let diskId
|
||||
beforeEach(async () => {
|
||||
vmId = await createVmTest()
|
||||
const srId = await getSrId(xo)
|
||||
diskId = await xo.call('disk.create', {
|
||||
name: 'diskTest',
|
||||
size: '1GB',
|
||||
sr: srId,
|
||||
})
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vdi.delete', { id: diskId })
|
||||
})
|
||||
|
||||
it('attaches the disk to the VM with attributes by default', async () => {
|
||||
await xo.call('vm.attachDisk', {
|
||||
vm: vmId,
|
||||
vdi: diskId,
|
||||
})
|
||||
const vm = await xo.waitObject(vmId)
|
||||
await waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.$VBDs).to.be.eql(vm.$VBDs)
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vm.$VBDs, vbd => {
|
||||
expect(vbd.type).to.be.equal('VBD')
|
||||
// expect(vbd.attached).to.be.true()
|
||||
expect(vbd.bootable).to.be.false()
|
||||
expect(vbd.is_cd_drive).to.be.false()
|
||||
expect(vbd.position).to.be.equal('0')
|
||||
expect(vbd.read_only).to.be.false()
|
||||
expect(vbd.VDI).to.be.equal(diskId)
|
||||
expect(vbd.VM).to.be.equal(vmId)
|
||||
expect(vbd.$poolId).to.be.equal(vm.$poolId)
|
||||
})
|
||||
})
|
||||
|
||||
it('attaches the disk to the VM with specified attributes', async () => {
|
||||
await xo.call('vm.attachDisk', {
|
||||
vm: vmId,
|
||||
vdi: diskId,
|
||||
bootable: true,
|
||||
mode: 'RO',
|
||||
position: '2',
|
||||
})
|
||||
const vm = await xo.waitObject(vmId)
|
||||
await waitObjectState(xo, vm.$VBDs, vbd => {
|
||||
expect(vbd.type).to.be.equal('VBD')
|
||||
// expect(vbd.attached).to.be.true()
|
||||
expect(vbd.bootable).to.be.true()
|
||||
expect(vbd.is_cd_drive).to.be.false()
|
||||
expect(vbd.position).to.be.equal('2')
|
||||
expect(vbd.read_only).to.be.true()
|
||||
expect(vbd.VDI).to.be.equal(diskId)
|
||||
expect(vbd.VM).to.be.equal(vmId)
|
||||
expect(vbd.$poolId).to.be.equal(vm.$poolId)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
describe('.createInterface()', () => {
|
||||
let vifId
|
||||
let networkId
|
||||
beforeAll(async () => {
|
||||
vmId = await getVmXoTestPvId(xo)
|
||||
networkId = await getNetworkId(xo)
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vif.delete', { id: vifId })
|
||||
})
|
||||
|
||||
it('create a VIF between the VM and the network', async () => {
|
||||
vifId = await xo.call('vm.createInterface', {
|
||||
vm: vmId,
|
||||
network: networkId,
|
||||
position: '1',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vifId, vif => {
|
||||
expect(vif.type).to.be.equal('VIF')
|
||||
// expect(vif.attached).to.be.true()
|
||||
expect(vif.$network).to.be.equal(networkId)
|
||||
expect(vif.$VM).to.be.equal(vmId)
|
||||
expect(vif.device).to.be.equal('1')
|
||||
})
|
||||
})
|
||||
|
||||
it('can not create two interfaces on the same device', async () => {
|
||||
vifId = await xo.call('vm.createInterface', {
|
||||
vm: vmId,
|
||||
network: networkId,
|
||||
position: '1',
|
||||
})
|
||||
await xo
|
||||
.call('vm.createInterface', {
|
||||
vm: vmId,
|
||||
network: networkId,
|
||||
position: '1',
|
||||
})
|
||||
.then(
|
||||
() => {
|
||||
throw new Error('createInterface() sould have trown')
|
||||
},
|
||||
function(error) {
|
||||
expect(error.message).to.be.equal('unknown error from the peer')
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
describe('.stats()', () => {
|
||||
jest.setTimeout(20e3)
|
||||
beforeAll(async () => {
|
||||
vmId = await getVmXoTestPvId(xo)
|
||||
})
|
||||
beforeEach(async () => {
|
||||
await xo.call('vm.start', { id: vmId })
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vm.stop', {
|
||||
id: vmId,
|
||||
force: true,
|
||||
})
|
||||
})
|
||||
|
||||
it('returns an array with statistics of the VM', async () => {
|
||||
const stats = await xo.call('vm.stats', { id: vmId })
|
||||
expect(stats).to.be.an.object()
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
describe('.bootOrder()', () => {
|
||||
it('')
|
||||
})
|
||||
})
|
||||
126
packages/xo-server-test/src/old-tests/vm/cd.spec.js
Normal file
126
packages/xo-server-test/src/old-tests/vm/cd.spec.js
Normal file
@@ -0,0 +1,126 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import {
|
||||
config,
|
||||
getOrWaitCdVbdPosition,
|
||||
rejectionOf,
|
||||
waitObjectState,
|
||||
xo,
|
||||
} from './../util'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(20e3)
|
||||
})
|
||||
|
||||
describe('cd', () => {
|
||||
let vmId
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
vmId = await xo.call('vm.create', {
|
||||
name_label: 'vmTest',
|
||||
template: config.templatesId.debian,
|
||||
})
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
if (vm.type !== 'VM') throw new Error('retry')
|
||||
})
|
||||
})
|
||||
|
||||
afterAll(() => xo.call('vm.delete', { id: vmId }))
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.insertCd()', () => {
|
||||
afterEach(() => xo.call('vm.ejectCd', { id: vmId }))
|
||||
|
||||
it('mount an ISO on the VM (force: false)', async () => {
|
||||
await xo.call('vm.insertCd', {
|
||||
id: vmId,
|
||||
cd_id: config.windowsIsoId,
|
||||
force: false,
|
||||
})
|
||||
const vbdId = await getOrWaitCdVbdPosition(vmId)
|
||||
|
||||
await waitObjectState(xo, vbdId, vbd => {
|
||||
expect(vbd.VDI).toBe(config.windowsIsoId)
|
||||
expect(vbd.is_cd_drive).toBeTruthy()
|
||||
expect(vbd.position).toBe('3')
|
||||
})
|
||||
})
|
||||
|
||||
it('mount an ISO on the VM (force: false) which has already a CD in the VBD', async () => {
|
||||
await xo.call('vm.insertCd', {
|
||||
id: vmId,
|
||||
cd_id: config.windowsIsoId,
|
||||
force: false,
|
||||
})
|
||||
await getOrWaitCdVbdPosition(vmId)
|
||||
|
||||
expect(
|
||||
(await rejectionOf(
|
||||
xo.call('vm.insertCd', {
|
||||
id: vmId,
|
||||
cd_id: config.ubuntuIsoId,
|
||||
force: false,
|
||||
})
|
||||
)).message
|
||||
).toBe('unknown error from the peer')
|
||||
})
|
||||
|
||||
it('mount an ISO on the VM (force: true) which has already a CD in the VBD', async () => {
|
||||
await xo.call('vm.insertCd', {
|
||||
id: vmId,
|
||||
cd_id: config.windowsIsoId,
|
||||
force: true,
|
||||
})
|
||||
const vbdId = await getOrWaitCdVbdPosition(vmId)
|
||||
|
||||
await xo.call('vm.insertCd', {
|
||||
id: vmId,
|
||||
cd_id: config.ubuntuIsoId,
|
||||
force: true,
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vbdId, vbd => {
|
||||
expect(vbd.VDI).toBe(config.ubuntuIsoId)
|
||||
expect(vbd.is_cd_drive).toBeTruthy()
|
||||
expect(vbd.position).toBe('3')
|
||||
})
|
||||
})
|
||||
|
||||
it("mount an ISO on a VM which do not have already cd's VBD", async () => {
|
||||
await xo.call('vm.insertCd', {
|
||||
id: vmId,
|
||||
cd_id: config.windowsIsoId,
|
||||
force: false,
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vmId, async vm => {
|
||||
expect(vm.$VBDs).toHaveLength(1)
|
||||
const vbd = await xo.getOrWaitObject(vm.$VBDs)
|
||||
expect(vbd.is_cd_drive).toBeTruthy()
|
||||
expect(vbd.position).toBe('3')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.ejectCd()', () => {
|
||||
it('ejects an ISO', async () => {
|
||||
await xo.call('vm.insertCd', {
|
||||
id: vmId,
|
||||
cd_id: config.windowsIsoId,
|
||||
force: false,
|
||||
})
|
||||
|
||||
const vbdId = await getOrWaitCdVbdPosition(vmId)
|
||||
|
||||
await xo.call('vm.ejectCd', { id: vmId })
|
||||
await waitObjectState(xo, vbdId, vbd => {
|
||||
expect(vbd.VDI).toBeNull()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
268
packages/xo-server-test/src/old-tests/vm/life-cyle.spec.js
Normal file
268
packages/xo-server-test/src/old-tests/vm/life-cyle.spec.js
Normal file
@@ -0,0 +1,268 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { map, size } from 'lodash'
|
||||
|
||||
import { config, rejectionOf, waitObjectState, xo } from './../util'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(150e3)
|
||||
})
|
||||
|
||||
describe('the VM life cyle', () => {
|
||||
const vmsToDelete = []
|
||||
// hvm with tools behave like pv vm
|
||||
let hvmWithToolsId
|
||||
let hvmWithoutToolsId
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
hvmWithToolsId = await xo.call('vm.create', {
|
||||
name_label: 'vmTest-updateState',
|
||||
template: config.templatesId.debianCloud,
|
||||
VIFs: [{ network: config.labPoolNetworkId }],
|
||||
VDIs: [
|
||||
{
|
||||
device: '0',
|
||||
size: 1,
|
||||
SR: config.labPoolSrId,
|
||||
type: 'user',
|
||||
},
|
||||
],
|
||||
})
|
||||
vmsToDelete.push(hvmWithToolsId)
|
||||
await waitObjectState(xo, hvmWithToolsId, vm => {
|
||||
if (vm.type !== 'VM') throw new Error('retry')
|
||||
})
|
||||
|
||||
hvmWithoutToolsId = await xo.call('vm.create', {
|
||||
name_label: 'vmTest-updateState',
|
||||
template: config.templatesId.centOS,
|
||||
VIFs: [{ network: config.labPoolNetworkId }],
|
||||
VDIs: [
|
||||
{
|
||||
device: '0',
|
||||
size: 1,
|
||||
SR: config.labPoolSrId,
|
||||
type: 'user',
|
||||
},
|
||||
],
|
||||
})
|
||||
vmsToDelete.push(hvmWithoutToolsId)
|
||||
await waitObjectState(xo, hvmWithoutToolsId, vm => {
|
||||
if (vm.type !== 'VM') throw new Error('retry')
|
||||
})
|
||||
})
|
||||
|
||||
afterAll(async () => {
|
||||
await Promise.all(
|
||||
map(vmsToDelete, id =>
|
||||
xo
|
||||
.call('vm.delete', { id, delete_disks: true })
|
||||
.catch(error => console.error(error))
|
||||
)
|
||||
)
|
||||
vmsToDelete.length = 0
|
||||
})
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.start()', () => {
|
||||
it('starts a VM', async () => {
|
||||
await xo.call('vm.start', { id: hvmWithToolsId })
|
||||
|
||||
await waitObjectState(xo, hvmWithToolsId, vm => {
|
||||
expect(size(vm.current_operations)).toBe(0)
|
||||
expect(vm.power_state).toBe('Running')
|
||||
expect(vm.startTime).not.toBe(0)
|
||||
expect(vm.xenTools).not.toBeFalsy()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.sets() on a running VM', () => {
|
||||
it('sets VM parameters', async () => {
|
||||
await xo.call('vm.set', {
|
||||
id: hvmWithToolsId,
|
||||
name_label: 'startedVmRenamed',
|
||||
name_description: 'test started vm',
|
||||
high_availability: true,
|
||||
CPUs: 1,
|
||||
memoryMin: 260e6,
|
||||
})
|
||||
|
||||
await waitObjectState(xo, hvmWithToolsId, vm => {
|
||||
expect(vm.name_label).toBe('startedVmRenamed')
|
||||
expect(vm.name_description).toBe('test started vm')
|
||||
expect(vm.high_availability).toBeTruthy()
|
||||
expect(vm.CPUs.number).toBe(1)
|
||||
expect(vm.memory.dynamic[0]).toBe(260e6)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.restart()', () => {
|
||||
it('restarts a VM (clean reboot)', async () => {
|
||||
await xo.call('vm.restart', {
|
||||
id: hvmWithToolsId,
|
||||
force: false,
|
||||
})
|
||||
|
||||
await waitObjectState(xo, hvmWithToolsId, vm => {
|
||||
expect(size(vm.current_operations)).toBe(0)
|
||||
expect(vm.power_state).toBe('Running')
|
||||
expect(vm.startTime).not.toBe(0)
|
||||
expect(vm.xenTools).not.toBeFalsy()
|
||||
})
|
||||
})
|
||||
|
||||
it('restarts a VM without PV drivers(clean reboot)', async () => {
|
||||
await xo.call('vm.start', { id: hvmWithoutToolsId })
|
||||
await waitObjectState(xo, hvmWithoutToolsId, vm => {
|
||||
if (size(vm.current_operations) !== 0 || vm.power_state !== 'Running')
|
||||
throw new Error('retry')
|
||||
})
|
||||
|
||||
expect(
|
||||
(await rejectionOf(
|
||||
xo.call('vm.restart', {
|
||||
id: hvmWithoutToolsId,
|
||||
force: false,
|
||||
})
|
||||
)).message
|
||||
).toBe('VM lacks feature shutdown')
|
||||
})
|
||||
|
||||
it('restarts a VM (hard reboot)', async () => {
|
||||
await xo.call('vm.restart', {
|
||||
id: hvmWithToolsId,
|
||||
force: true,
|
||||
})
|
||||
|
||||
await waitObjectState(xo, hvmWithToolsId, vm => {
|
||||
expect(size(vm.current_operations)).toBe(0)
|
||||
expect(vm.power_state).toBe('Running')
|
||||
expect(vm.startTime).not.toBe(0)
|
||||
expect(vm.xenTools).not.toBeFalsy()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.suspend()', () => {
|
||||
it('suspends a VM', async () => {
|
||||
await xo.call('vm.suspend', { id: hvmWithToolsId })
|
||||
|
||||
await waitObjectState(xo, hvmWithToolsId, vm => {
|
||||
expect(size(vm.current_operations)).toBe(0)
|
||||
expect(vm.power_state).toBe('Suspended')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.resume()', () => {
|
||||
it('resumes a VM', async () => {
|
||||
await xo.call('vm.resume', { id: hvmWithToolsId })
|
||||
|
||||
await waitObjectState(xo, hvmWithToolsId, vm => {
|
||||
expect(size(vm.current_operations)).toBe(0)
|
||||
expect(vm.power_state).toBe('Running')
|
||||
expect(vm.startTime).not.toBe(0)
|
||||
expect(vm.xenTools).not.toBeFalsy()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.stop()', () => {
|
||||
it('stops a VM (clean shutdown)', async () => {
|
||||
await xo.call('vm.stop', {
|
||||
id: hvmWithToolsId,
|
||||
force: false,
|
||||
})
|
||||
|
||||
await waitObjectState(xo, hvmWithToolsId, vm => {
|
||||
expect(size(vm.current_operations)).toBe(0)
|
||||
expect(vm.power_state).toBe('Halted')
|
||||
expect(vm.startTime).toBe(0)
|
||||
})
|
||||
})
|
||||
|
||||
it('stops a VM without PV drivers (clean shutdown)', async () => {
|
||||
await xo.call('vm.start', { id: hvmWithoutToolsId })
|
||||
await waitObjectState(xo, hvmWithoutToolsId, vm => {
|
||||
if (size(vm.current_operations) !== 0 || vm.power_state !== 'Running')
|
||||
throw new Error('retry')
|
||||
})
|
||||
|
||||
expect(
|
||||
(await rejectionOf(
|
||||
xo.call('vm.stop', {
|
||||
id: hvmWithoutToolsId,
|
||||
force: false,
|
||||
})
|
||||
)).message
|
||||
).toBe('clean shutdown requires PV drivers')
|
||||
})
|
||||
|
||||
it('stops a VM (hard shutdown)', async () => {
|
||||
await xo.call('vm.start', { id: hvmWithToolsId })
|
||||
await waitObjectState(xo, hvmWithToolsId, vm => {
|
||||
if (size(vm.current_operations) !== 0 || vm.startTime === 0)
|
||||
throw new Error('retry')
|
||||
})
|
||||
|
||||
await xo.call('vm.stop', {
|
||||
id: hvmWithToolsId,
|
||||
force: true,
|
||||
})
|
||||
|
||||
await waitObjectState(xo, hvmWithToolsId, vm => {
|
||||
expect(size(vm.current_operations)).toBe(0)
|
||||
expect(vm.power_state).toBe('Halted')
|
||||
expect(vm.startTime).toBe(0)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.sets() on a halted VM', () => {
|
||||
it('sets VM parameters', async () => {
|
||||
await xo.call('vm.set', {
|
||||
id: hvmWithToolsId,
|
||||
name_label: 'haltedVmRenamed',
|
||||
name_description: 'test halted vm',
|
||||
high_availability: true,
|
||||
CPUs: 1,
|
||||
memoryMin: 20e8,
|
||||
memoryMax: 90e8,
|
||||
memoryStaticMax: 100e8,
|
||||
})
|
||||
|
||||
await waitObjectState(xo, hvmWithToolsId, vm => {
|
||||
expect(vm.name_label).toBe('haltedVmRenamed')
|
||||
expect(vm.name_description).toBe('test halted vm')
|
||||
expect(vm.high_availability).toBeTruthy()
|
||||
expect(vm.CPUs.number).toBe(1)
|
||||
expect(vm.memory.dynamic[0]).toBe(20e8)
|
||||
expect(vm.memory.dynamic[1]).toBe(90e8)
|
||||
expect(vm.memory.static[1]).toBe(100e8)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.recoveryStart()', () => {
|
||||
it('start a VM in recovery state', async () => {
|
||||
await xo.call('vm.recoveryStart', { id: hvmWithToolsId })
|
||||
|
||||
await waitObjectState(xo, hvmWithToolsId, vm => {
|
||||
expect(vm.boot.order).toBe('d')
|
||||
})
|
||||
|
||||
await waitObjectState(xo, hvmWithToolsId, vm => {
|
||||
expect(size(vm.current_operations)).toBe(0)
|
||||
expect(vm.power_state).toBe('Running')
|
||||
expect(vm.boot.order).not.toBe('d')
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
57
packages/xo-server-test/src/old-tests/vm/pci.spec.js
Normal file
57
packages/xo-server-test/src/old-tests/vm/pci.spec.js
Normal file
@@ -0,0 +1,57 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { config, waitObjectState, xo } from './../util'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(30e3)
|
||||
})
|
||||
|
||||
describe('pci', () => {
|
||||
let vmId
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
vmId = await xo.call('vm.create', {
|
||||
name_label: 'vmTest',
|
||||
template: config.templatesId.debianCloud,
|
||||
VIFs: [{ network: config.labPoolNetworkId }],
|
||||
VDIs: [
|
||||
{
|
||||
device: '0',
|
||||
size: 1,
|
||||
SR: config.labPoolSrId,
|
||||
type: 'user',
|
||||
},
|
||||
],
|
||||
})
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
if (vm.type !== 'VM') throw new Error('retry')
|
||||
})
|
||||
})
|
||||
|
||||
afterAll(() => xo.call('vm.delete', { id: vmId, delete_disks: true }))
|
||||
|
||||
// =================================================================
|
||||
|
||||
it('attaches the pci to the VM', async () => {
|
||||
await xo.call('vm.attachPci', {
|
||||
vm: vmId,
|
||||
pciId: config.pciId,
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.other.pci).toBe(config.pciId)
|
||||
})
|
||||
})
|
||||
|
||||
it('detaches the pci from the VM', async () => {
|
||||
await xo.call('vm.detachPci', { vm: vmId })
|
||||
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.other.pci).toBeUndefined()
|
||||
})
|
||||
})
|
||||
})
|
||||
121
packages/xo-server-test/src/old-tests/vm/snapshotting.spec.js
Normal file
121
packages/xo-server-test/src/old-tests/vm/snapshotting.spec.js
Normal file
@@ -0,0 +1,121 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { map, size } from 'lodash'
|
||||
|
||||
import { almostEqual, config, waitObjectState, xo } from './../util'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(100e3)
|
||||
})
|
||||
|
||||
describe('snapshotting', () => {
|
||||
let snapshotId
|
||||
let vmId
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
vmId = await xo.call('vm.create', {
|
||||
name_label: 'vmTest',
|
||||
name_description: 'creating a vm',
|
||||
template: config.templatesId.centOS,
|
||||
VIFs: [
|
||||
{ network: config.labPoolNetworkId },
|
||||
{ network: config.labPoolNetworkId },
|
||||
],
|
||||
VDIs: [
|
||||
{
|
||||
device: '0',
|
||||
size: 1,
|
||||
SR: config.labPoolSrId,
|
||||
type: 'user',
|
||||
},
|
||||
{
|
||||
device: '1',
|
||||
size: 1,
|
||||
SR: config.labPoolSrId,
|
||||
type: 'user',
|
||||
},
|
||||
{
|
||||
device: '2',
|
||||
size: 1,
|
||||
SR: config.labPoolSrId,
|
||||
type: 'user',
|
||||
},
|
||||
],
|
||||
})
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
if (vm.type !== 'VM') throw new Error('retry')
|
||||
})
|
||||
})
|
||||
|
||||
afterAll(() => xo.call('vm.delete', { id: vmId, delete_disks: true }))
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.snapshot()', () => {
|
||||
let $vm
|
||||
|
||||
it('snapshots a VM', async () => {
|
||||
snapshotId = await xo.call('vm.snapshot', {
|
||||
id: vmId,
|
||||
name: 'snapshot',
|
||||
})
|
||||
|
||||
const [, snapshot] = await Promise.all([
|
||||
waitObjectState(xo, vmId, vm => {
|
||||
$vm = vm
|
||||
expect(vm.snapshots[0]).toBe(snapshotId)
|
||||
}),
|
||||
xo.getOrWaitObject(snapshotId),
|
||||
])
|
||||
|
||||
expect(snapshot.type).toBe('VM-snapshot')
|
||||
expect(snapshot.name_label).toBe('snapshot')
|
||||
expect(snapshot.$snapshot_of).toBe(vmId)
|
||||
|
||||
almostEqual(snapshot, $vm, [
|
||||
'$snapshot_of',
|
||||
'$VBDs',
|
||||
'id',
|
||||
'installTime',
|
||||
'name_label',
|
||||
'snapshot_time',
|
||||
'snapshots',
|
||||
'type',
|
||||
'uuid',
|
||||
'VIFs',
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe('.revert()', () => {
|
||||
let createdSnapshotId
|
||||
|
||||
it('reverts a snapshot to its parent VM', async () => {
|
||||
await xo.call('vm.set', {
|
||||
id: vmId,
|
||||
name_label: 'vmRenamed',
|
||||
})
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
if (vm.name_label !== 'vmRenamed') throw new Error('retry')
|
||||
})
|
||||
|
||||
await xo.call('vm.revert', { id: snapshotId })
|
||||
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(size(vm.current_operations)).toBe(0)
|
||||
expect(vm.name_label).toBe('vmTest')
|
||||
expect(size(vm.snapshots)).toBe(2)
|
||||
map(vm.snapshots, snapshot => {
|
||||
if (snapshot !== snapshotId) createdSnapshotId = snapshot
|
||||
})
|
||||
})
|
||||
|
||||
const createdSnapshot = await xo.getOrWaitObject(createdSnapshotId)
|
||||
expect(createdSnapshot.name_label).toBe('vmRenamed')
|
||||
})
|
||||
})
|
||||
})
|
||||
114
packages/xo-server-test/src/user/__snapshots__/user.spec.js.snap
Normal file
114
packages/xo-server-test/src/user/__snapshots__/user.spec.js.snap
Normal file
@@ -0,0 +1,114 @@
|
||||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`user .changePassword() : changes the actual user password 1`] = `true`;
|
||||
|
||||
exports[`user .changePassword() : changes the actual user password 2`] = `[JsonRpcError: invalid credentials]`;
|
||||
|
||||
exports[`user .changePassword() : fails trying to change the password with invalid oldPassword 1`] = `[JsonRpcError: invalid credentials]`;
|
||||
|
||||
exports[`user .create() : creates a user with permission 1`] = `
|
||||
Object {
|
||||
"email": "wayne2@vates.fr",
|
||||
"groups": Array [],
|
||||
"id": Any<String>,
|
||||
"permission": "user",
|
||||
"preferences": Object {},
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`user .create() : creates a user without permission 1`] = `
|
||||
Object {
|
||||
"email": "wayne1@vates.fr",
|
||||
"groups": Array [],
|
||||
"id": Any<String>,
|
||||
"preferences": Object {},
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`user .create() : fails trying to create a user with an email already used 1`] = `[JsonRpcError: unknown error from the peer]`;
|
||||
|
||||
exports[`user .create() : fails trying to create a user without email 1`] = `[JsonRpcError: invalid parameters]`;
|
||||
|
||||
exports[`user .create() : fails trying to create a user without password 1`] = `[JsonRpcError: invalid parameters]`;
|
||||
|
||||
exports[`user .delete() : fails trying to delete a user with a nonexistent user 1`] = `[JsonRpcError: no such user nonexistentId]`;
|
||||
|
||||
exports[`user .delete() : fails trying to delete itself 1`] = `[JsonRpcError: a user cannot delete itself]`;
|
||||
|
||||
exports[`user .getAll() : gets all the users created 1`] = `
|
||||
Object {
|
||||
"email": "wayne4@vates.fr",
|
||||
"groups": Array [],
|
||||
"id": Any<String>,
|
||||
"permission": "user",
|
||||
"preferences": Object {},
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`user .getAll() : gets all the users created 2`] = `
|
||||
Object {
|
||||
"email": "wayne5@vates.fr",
|
||||
"groups": Array [],
|
||||
"id": Any<String>,
|
||||
"permission": "user",
|
||||
"preferences": Object {},
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`user .set() : fails trying to set a password with a non admin user connection 1`] = `[JsonRpcError: this properties can only changed by an administrator]`;
|
||||
|
||||
exports[`user .set() : fails trying to set a permission with a non admin user connection 1`] = `[JsonRpcError: this properties can only changed by an administrator]`;
|
||||
|
||||
exports[`user .set() : fails trying to set a property of a nonexistant user 1`] = `[JsonRpcError: no such user non-existent-id]`;
|
||||
|
||||
exports[`user .set() : fails trying to set an email with a non admin user connection 1`] = `[JsonRpcError: this properties can only changed by an administrator]`;
|
||||
|
||||
exports[`user .set() : fails trying to set its own permission as a non admin user 1`] = `[JsonRpcError: this properties can only changed by an administrator]`;
|
||||
|
||||
exports[`user .set() : fails trying to set its own permission as an admin 1`] = `[JsonRpcError: a user cannot change its own permission]`;
|
||||
|
||||
exports[`user .set() : sets a password 1`] = `
|
||||
Object {
|
||||
"email": "wayne3@vates.fr",
|
||||
"groups": Array [],
|
||||
"id": Any<String>,
|
||||
"permission": "none",
|
||||
"preferences": Object {},
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`user .set() : sets a permission 1`] = `
|
||||
Object {
|
||||
"email": "wayne3@vates.fr",
|
||||
"groups": Array [],
|
||||
"id": Any<String>,
|
||||
"permission": "user",
|
||||
"preferences": Object {},
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`user .set() : sets a preference 1`] = `
|
||||
Object {
|
||||
"email": "wayne3@vates.fr",
|
||||
"groups": Array [],
|
||||
"id": Any<String>,
|
||||
"permission": "none",
|
||||
"preferences": Object {
|
||||
"filters": Object {
|
||||
"VM": Object {
|
||||
"test": "name_label: test",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`user .set() : sets an email 1`] = `
|
||||
Object {
|
||||
"email": "wayne_modified@vates.fr",
|
||||
"groups": Array [],
|
||||
"id": Any<String>,
|
||||
"permission": "none",
|
||||
"preferences": Object {},
|
||||
}
|
||||
`;
|
||||
261
packages/xo-server-test/src/user/user.spec.js
Normal file
261
packages/xo-server-test/src/user/user.spec.js
Normal file
@@ -0,0 +1,261 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { forOwn, keyBy } from 'lodash'
|
||||
|
||||
import xo, { testConnection, testWithOtherConnection } from '../_xoConnection'
|
||||
|
||||
const SIMPLE_USER = {
|
||||
email: 'wayne3@vates.fr',
|
||||
password: 'batman',
|
||||
}
|
||||
|
||||
const ADMIN_USER = {
|
||||
email: 'admin2@admin.net',
|
||||
password: 'admin',
|
||||
permission: 'admin',
|
||||
}
|
||||
|
||||
const withData = (data, fn) =>
|
||||
forOwn(data, (data, title) => {
|
||||
it(title, () => fn(data))
|
||||
})
|
||||
|
||||
describe('user', () => {
|
||||
describe('.create() :', () => {
|
||||
withData(
|
||||
{
|
||||
'creates a user without permission': {
|
||||
email: 'wayne1@vates.fr',
|
||||
password: 'batman1',
|
||||
},
|
||||
'creates a user with permission': {
|
||||
email: 'wayne2@vates.fr',
|
||||
password: 'batman2',
|
||||
permission: 'user',
|
||||
},
|
||||
},
|
||||
async data => {
|
||||
const userId = await xo.createTempUser(data)
|
||||
expect(typeof userId).toBe('string')
|
||||
expect(await xo.getUser(userId)).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
})
|
||||
await testConnection({
|
||||
credentials: {
|
||||
email: data.email,
|
||||
password: data.password,
|
||||
},
|
||||
})
|
||||
}
|
||||
)
|
||||
|
||||
withData(
|
||||
{
|
||||
'fails trying to create a user without email': { password: 'batman' },
|
||||
'fails trying to create a user without password': {
|
||||
email: 'wayne@vates.fr',
|
||||
},
|
||||
},
|
||||
async data => {
|
||||
await expect(xo.createTempUser(data)).rejects.toMatchSnapshot()
|
||||
}
|
||||
)
|
||||
|
||||
it('fails trying to create a user with an email already used', async () => {
|
||||
await xo.createTempUser(SIMPLE_USER)
|
||||
await expect(xo.createTempUser(SIMPLE_USER)).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.changePassword() :', () => {
|
||||
it('changes the actual user password', async () => {
|
||||
const user = {
|
||||
email: 'wayne7@vates.fr',
|
||||
password: 'batman',
|
||||
}
|
||||
const newPassword = 'newpwd'
|
||||
|
||||
await xo.createTempUser(user)
|
||||
await testWithOtherConnection(user, xo =>
|
||||
expect(
|
||||
xo.call('user.changePassword', {
|
||||
oldPassword: user.password,
|
||||
newPassword,
|
||||
})
|
||||
).resolves.toMatchSnapshot()
|
||||
)
|
||||
|
||||
await testConnection({
|
||||
credentials: {
|
||||
email: user.email,
|
||||
password: newPassword,
|
||||
},
|
||||
})
|
||||
|
||||
await expect(
|
||||
testConnection({
|
||||
credentials: user,
|
||||
})
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to change the password with invalid oldPassword', async () => {
|
||||
await xo.createTempUser(SIMPLE_USER)
|
||||
await testWithOtherConnection(SIMPLE_USER, xo =>
|
||||
expect(
|
||||
xo.call('user.changePassword', {
|
||||
oldPassword: 'falsepwd',
|
||||
newPassword: 'newpwd',
|
||||
})
|
||||
).rejects.toMatchSnapshot()
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.getAll() :', () => {
|
||||
it('gets all the users created', async () => {
|
||||
const userId1 = await xo.createTempUser({
|
||||
email: 'wayne4@vates.fr',
|
||||
password: 'batman',
|
||||
permission: 'user',
|
||||
})
|
||||
const userId2 = await xo.createTempUser({
|
||||
email: 'wayne5@vates.fr',
|
||||
password: 'batman',
|
||||
permission: 'user',
|
||||
})
|
||||
let users = await xo.call('user.getAll')
|
||||
expect(Array.isArray(users)).toBe(true)
|
||||
users = keyBy(users, 'id')
|
||||
expect(users[userId1]).toMatchSnapshot({ id: expect.any(String) })
|
||||
expect(users[userId2]).toMatchSnapshot({ id: expect.any(String) })
|
||||
})
|
||||
})
|
||||
|
||||
describe('.set() :', () => {
|
||||
withData(
|
||||
{
|
||||
'sets an email': { email: 'wayne_modified@vates.fr' },
|
||||
'sets a password': { password: 'newPassword' },
|
||||
'sets a permission': { permission: 'user' },
|
||||
'sets a preference': {
|
||||
preferences: {
|
||||
filters: {
|
||||
VM: {
|
||||
test: 'name_label: test',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
async data => {
|
||||
data.id = await xo.createTempUser(SIMPLE_USER)
|
||||
expect(await xo.call('user.set', data)).toBe(true)
|
||||
expect(await xo.getUser(data.id)).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
})
|
||||
|
||||
await testConnection({
|
||||
credentials: {
|
||||
email: data.email === undefined ? SIMPLE_USER.email : data.email,
|
||||
password:
|
||||
data.password === undefined
|
||||
? SIMPLE_USER.password
|
||||
: data.password,
|
||||
},
|
||||
})
|
||||
}
|
||||
)
|
||||
|
||||
withData(
|
||||
{
|
||||
'fails trying to set an email with a non admin user connection': {
|
||||
email: 'wayne_modified@vates.fr',
|
||||
},
|
||||
'fails trying to set a password with a non admin user connection': {
|
||||
password: 'newPassword',
|
||||
},
|
||||
'fails trying to set a permission with a non admin user connection': {
|
||||
permission: 'user',
|
||||
},
|
||||
},
|
||||
async data => {
|
||||
data.id = await xo.createTempUser({
|
||||
email: 'wayne8@vates.fr',
|
||||
password: 'batman8',
|
||||
})
|
||||
await xo.createTempUser(SIMPLE_USER)
|
||||
|
||||
await testWithOtherConnection(SIMPLE_USER, xo =>
|
||||
expect(xo.call('user.set', data)).rejects.toMatchSnapshot()
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
withData(
|
||||
{
|
||||
'fails trying to set its own permission as a non admin user': SIMPLE_USER,
|
||||
'fails trying to set its own permission as an admin': {
|
||||
email: 'admin2@admin.net',
|
||||
password: 'batman',
|
||||
permission: 'admin',
|
||||
},
|
||||
},
|
||||
async data => {
|
||||
const id = await xo.createTempUser(data)
|
||||
const { email, password } = data
|
||||
await testWithOtherConnection({ email, password }, xo =>
|
||||
expect(
|
||||
xo.call('user.set', { id, permission: 'user' })
|
||||
).rejects.toMatchSnapshot()
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
it('fails trying to set a property of a nonexistant user', async () => {
|
||||
await expect(
|
||||
xo.call('user.set', {
|
||||
id: 'non-existent-id',
|
||||
password: SIMPLE_USER.password,
|
||||
})
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it.skip('fails trying to set an email already used', async () => {
|
||||
await xo.createTempUser(SIMPLE_USER)
|
||||
const userId2 = await xo.createTempUser({
|
||||
email: 'wayne6@vates.fr',
|
||||
password: 'batman',
|
||||
})
|
||||
|
||||
await expect(
|
||||
xo.call('user.set', {
|
||||
id: userId2,
|
||||
email: SIMPLE_USER.email,
|
||||
})
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.delete() :', () => {
|
||||
it('deletes a user successfully with id', async () => {
|
||||
const userId = await xo.call('user.create', SIMPLE_USER)
|
||||
expect(await xo.call('user.delete', { id: userId })).toBe(true)
|
||||
expect(await xo.getUser(userId)).toBe(undefined)
|
||||
})
|
||||
|
||||
it('fails trying to delete a user with a nonexistent user', async () => {
|
||||
await expect(
|
||||
xo.call('user.delete', { id: 'nonexistentId' })
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to delete itself', async () => {
|
||||
const id = await xo.createTempUser(ADMIN_USER)
|
||||
const { email, password } = ADMIN_USER
|
||||
await testWithOtherConnection({ email, password }, xo =>
|
||||
expect(xo.call('user.delete', { id })).rejects.toMatchSnapshot()
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
146
packages/xo-server-test/src/util.js
Normal file
146
packages/xo-server-test/src/util.js
Normal file
@@ -0,0 +1,146 @@
|
||||
import expect from 'must'
|
||||
import { find, forEach, map, cloneDeep } from 'lodash'
|
||||
|
||||
import config from './_config'
|
||||
|
||||
export const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
reason => reason
|
||||
)
|
||||
|
||||
// =================================================================
|
||||
|
||||
async function getAllUsers(xo) {
|
||||
return xo.call('user.getAll')
|
||||
}
|
||||
|
||||
export async function getUser(xo, id) {
|
||||
const users = await getAllUsers(xo)
|
||||
return find(users, { id })
|
||||
}
|
||||
|
||||
export async function createUser(xo, userIds, params) {
|
||||
const userId = await xo.call('user.create', params)
|
||||
userIds.push(userId)
|
||||
return userId
|
||||
}
|
||||
|
||||
export async function deleteUsers(xo, userIds) {
|
||||
await Promise.all(
|
||||
map(userIds, userId => xo.call('user.delete', { id: userId }))
|
||||
)
|
||||
}
|
||||
|
||||
// ==================================================================
|
||||
|
||||
export function getAllHosts(xo) {
|
||||
return xo.objects.indexes.type.host
|
||||
}
|
||||
|
||||
export function getOneHost(xo) {
|
||||
const hosts = getAllHosts(xo)
|
||||
for (const id in hosts) {
|
||||
return hosts[id]
|
||||
}
|
||||
|
||||
throw new Error('no hosts found')
|
||||
}
|
||||
|
||||
// ==================================================================
|
||||
|
||||
export async function getNetworkId(xo) {
|
||||
const networks = xo.objects.indexes.type.network
|
||||
const network = find(networks, { name_label: config.network })
|
||||
return network.id
|
||||
}
|
||||
|
||||
// ==================================================================
|
||||
|
||||
export async function getVmXoTestPvId(xo) {
|
||||
const vms = xo.objects.indexes.type.VM
|
||||
const vm = find(vms, { name_label: config.pvVm })
|
||||
return vm.id
|
||||
}
|
||||
|
||||
export async function getVmToMigrateId(xo) {
|
||||
const vms = xo.objects.indexes.type.VM
|
||||
const vm = find(vms, { name_label: config.vmToMigrate })
|
||||
return vm.id
|
||||
}
|
||||
|
||||
// ==================================================================
|
||||
|
||||
export async function getSrId(xo) {
|
||||
const host = getOneHost(xo)
|
||||
const pool = await xo.getOrWaitObject(host.$poolId)
|
||||
return pool.default_SR
|
||||
}
|
||||
|
||||
// ==================================================================
|
||||
|
||||
export async function jobTest(xo) {
|
||||
const vmId = await getVmXoTestPvId(xo)
|
||||
const jobId = await xo.call('job.create', {
|
||||
job: {
|
||||
type: 'call',
|
||||
key: 'snapshot',
|
||||
method: 'vm.snapshot',
|
||||
paramsVector: {
|
||||
type: 'cross product',
|
||||
items: [
|
||||
{
|
||||
type: 'set',
|
||||
values: [
|
||||
{
|
||||
id: vmId,
|
||||
name: 'snapshot',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
})
|
||||
return jobId
|
||||
}
|
||||
|
||||
export async function scheduleTest(xo, jobId) {
|
||||
const schedule = await xo.call('schedule.create', {
|
||||
jobId: jobId,
|
||||
cron: '* * * * * *',
|
||||
enabled: false,
|
||||
})
|
||||
return schedule
|
||||
}
|
||||
|
||||
export async function getSchedule(xo, id) {
|
||||
const schedule = xo.call('schedule.get', { id: id })
|
||||
return schedule
|
||||
}
|
||||
|
||||
// ==================================================================
|
||||
|
||||
export function deepDelete(obj, path) {
|
||||
const lastIndex = path.length - 1
|
||||
for (let i = 0; i < lastIndex; i++) {
|
||||
obj = obj[path[i]]
|
||||
|
||||
if (typeof obj !== 'object' || obj === null) {
|
||||
return
|
||||
}
|
||||
}
|
||||
delete obj[path[lastIndex]]
|
||||
}
|
||||
|
||||
export function almostEqual(actual, expected, ignoredAttributes) {
|
||||
actual = cloneDeep(actual)
|
||||
expected = cloneDeep(expected)
|
||||
forEach(ignoredAttributes, ignoredAttribute => {
|
||||
deepDelete(actual, ignoredAttribute.split('.'))
|
||||
deepDelete(expected, ignoredAttribute.split('.'))
|
||||
})
|
||||
expect(actual).to.be.eql(expected)
|
||||
}
|
||||
@@ -34,7 +34,7 @@
|
||||
"dependencies": {
|
||||
"nodemailer": "^6.1.0",
|
||||
"nodemailer-markdown": "^1.0.1",
|
||||
"promise-toolbox": "^0.12.1"
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"slack-node": "^0.1.8"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
"html-minifier": "^4.0.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.12.1"
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -29,6 +29,9 @@ guessVhdSizeOnImport = false
|
||||
# be turned for investigation by the administrator.
|
||||
verboseApiLogsOnErrors = false
|
||||
|
||||
# if no events could be fetched during this delay, the server will be marked as disconnected
|
||||
xapiMarkDisconnectedDelay = '5 minutes'
|
||||
|
||||
# https:#github.com/websockets/ws#websocket-compression
|
||||
[apiWebSocketOptions]
|
||||
perMessageDeflate = { threshold = 524288 } # 512kiB
|
||||
@@ -49,6 +52,11 @@ maxTokenValidity = '0.5 year'
|
||||
# Delay for which backups listing on a remote is cached
|
||||
listingDebounce = '1 min'
|
||||
|
||||
# Duration for which we can wait for the backup size before returning
|
||||
#
|
||||
# It should be short to avoid blocking the display of the available backups.
|
||||
vmBackupSizeTimeout = '2 seconds'
|
||||
|
||||
# Helmet handles HTTP security via headers
|
||||
#
|
||||
# https://helmetjs.github.io/docs/
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "xo-server",
|
||||
"version": "5.42.0",
|
||||
"version": "5.46.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Server part of Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -38,7 +38,7 @@
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/defined": "^0.0.0",
|
||||
"@xen-orchestra/emit-async": "^0.0.0",
|
||||
"@xen-orchestra/fs": "^0.9.0",
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"@xen-orchestra/mixin": "^0.0.0",
|
||||
"ajv": "^6.1.1",
|
||||
@@ -102,7 +102,7 @@
|
||||
"passport": "^0.4.0",
|
||||
"passport-local": "^1.0.0",
|
||||
"pretty-format": "^24.0.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"proxy-agent": "^3.0.0",
|
||||
"pug": "^2.0.0-rc.4",
|
||||
"pump": "^3.0.0",
|
||||
@@ -123,7 +123,7 @@
|
||||
"value-matcher": "^0.2.0",
|
||||
"vhd-lib": "^0.7.0",
|
||||
"ws": "^6.0.0",
|
||||
"xen-api": "^0.25.1",
|
||||
"xen-api": "^0.27.1",
|
||||
"xml2js": "^0.4.19",
|
||||
"xo-acl-resolver": "^0.4.1",
|
||||
"xo-collection": "^0.4.1",
|
||||
|
||||
@@ -123,10 +123,14 @@ getJob.params = {
|
||||
export async function runJob({
|
||||
id,
|
||||
schedule,
|
||||
settings,
|
||||
vm,
|
||||
vms = vm !== undefined ? [vm] : undefined,
|
||||
}) {
|
||||
return this.runJobSequence([id], await this.getSchedule(schedule), vms)
|
||||
return this.runJobSequence([id], await this.getSchedule(schedule), {
|
||||
settings,
|
||||
vms,
|
||||
})
|
||||
}
|
||||
|
||||
runJob.permission = 'admin'
|
||||
@@ -138,6 +142,13 @@ runJob.params = {
|
||||
schedule: {
|
||||
type: 'string',
|
||||
},
|
||||
settings: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
'*': { type: 'object' },
|
||||
},
|
||||
optional: true,
|
||||
},
|
||||
vm: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
@@ -183,6 +194,7 @@ getLogs.params = {
|
||||
after: { type: ['number', 'string'], optional: true },
|
||||
before: { type: ['number', 'string'], optional: true },
|
||||
limit: { type: 'number', optional: true },
|
||||
'*': { type: 'any' },
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
@@ -4,29 +4,34 @@ import { format, JsonRpcError } from 'json-rpc-peer'
|
||||
|
||||
export async function set({
|
||||
host,
|
||||
multipathing,
|
||||
|
||||
// TODO: use camel case.
|
||||
iscsiIqn,
|
||||
multipathing,
|
||||
name_label: nameLabel,
|
||||
name_description: nameDescription,
|
||||
}) {
|
||||
const xapi = this.getXapi(host)
|
||||
const hostId = host._xapiId
|
||||
host = this.getXapiObject(host)
|
||||
|
||||
if (multipathing !== undefined) {
|
||||
await xapi.setHostMultipathing(hostId, multipathing)
|
||||
}
|
||||
|
||||
return xapi.setHostProperties(hostId, {
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
})
|
||||
await Promise.all([
|
||||
iscsiIqn !== undefined &&
|
||||
(host.iscsi_iqn !== undefined
|
||||
? host.set_iscsi_iqn(iscsiIqn)
|
||||
: host.update_other_config(
|
||||
'iscsi_iqn',
|
||||
iscsiIqn === '' ? null : iscsiIqn
|
||||
)),
|
||||
nameDescription !== undefined && host.set_name_description(nameDescription),
|
||||
nameLabel !== undefined && host.set_name_label(nameLabel),
|
||||
multipathing !== undefined &&
|
||||
host.$xapi.setHostMultipathing(host.$id, multipathing),
|
||||
])
|
||||
}
|
||||
|
||||
set.description = 'changes the properties of an host'
|
||||
|
||||
set.params = {
|
||||
id: { type: 'string' },
|
||||
iscsiIqn: { type: 'string', optional: true },
|
||||
name_label: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
@@ -215,6 +220,25 @@ emergencyShutdownHost.resolve = {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function isHostServerTimeConsistent({ host }) {
|
||||
try {
|
||||
await this.getXapi(host).assertConsistentHostServerTime(host._xapiRef)
|
||||
return true
|
||||
} catch (e) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
isHostServerTimeConsistent.params = {
|
||||
host: { type: 'string' },
|
||||
}
|
||||
|
||||
isHostServerTimeConsistent.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function stats({ host, granularity }) {
|
||||
return this.getXapiHostStats(host._xapiId, granularity)
|
||||
}
|
||||
@@ -269,3 +293,19 @@ installSupplementalPack.params = {
|
||||
installSupplementalPack.resolve = {
|
||||
host: ['host', 'host', 'admin'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function isHyperThreadingEnabled({ host }) {
|
||||
return this.getXapi(host).isHyperThreadingEnabled(host._xapiId)
|
||||
}
|
||||
|
||||
isHyperThreadingEnabled.description = 'get hyper-threading information'
|
||||
|
||||
isHyperThreadingEnabled.params = {
|
||||
id: { type: 'string' },
|
||||
}
|
||||
|
||||
isHyperThreadingEnabled.resolve = {
|
||||
host: ['id', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
@@ -85,18 +85,26 @@ createBonded.description =
|
||||
// ===================================================================
|
||||
|
||||
export async function set({
|
||||
network,
|
||||
|
||||
automatic,
|
||||
defaultIsLocked,
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
network,
|
||||
}) {
|
||||
await this.getXapi(network).setNetworkProperties(network._xapiId, {
|
||||
automatic,
|
||||
defaultIsLocked,
|
||||
nameDescription,
|
||||
nameLabel,
|
||||
})
|
||||
network = this.getXapiObject(network)
|
||||
|
||||
await Promise.all([
|
||||
automatic !== undefined &&
|
||||
network.update_other_config('automatic', automatic ? 'true' : null),
|
||||
defaultIsLocked !== undefined &&
|
||||
network.set_default_locking_mode(
|
||||
defaultIsLocked ? 'disabled' : 'unlocked'
|
||||
),
|
||||
nameDescription !== undefined &&
|
||||
network.set_name_description(nameDescription),
|
||||
nameLabel !== undefined && network.set_name_label(nameLabel),
|
||||
])
|
||||
}
|
||||
|
||||
set.params = {
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
// TODO: too low level, move into host.
|
||||
|
||||
import { filter, find } from 'lodash'
|
||||
|
||||
import { IPV4_CONFIG_MODES, IPV6_CONFIG_MODES } from '../xapi'
|
||||
|
||||
export function getIpv4ConfigurationModes() {
|
||||
@@ -15,7 +17,17 @@ export function getIpv6ConfigurationModes() {
|
||||
|
||||
async function delete_({ pif }) {
|
||||
// TODO: check if PIF is attached before
|
||||
await this.getXapi(pif).callAsync('PIF.destroy', pif._xapiRef)
|
||||
const xapi = this.getXapi(pif)
|
||||
|
||||
const tunnels = filter(xapi.objects.all, { $type: 'tunnel' })
|
||||
const tunnel = find(tunnels, { access_PIF: pif._xapiRef })
|
||||
if (tunnel != null) {
|
||||
await xapi.callAsync('PIF.unplug', pif._xapiRef)
|
||||
await xapi.callAsync('tunnel.destroy', tunnel.$ref)
|
||||
return
|
||||
}
|
||||
|
||||
await xapi.callAsync('PIF.destroy', pif._xapiRef)
|
||||
}
|
||||
export { delete_ as delete }
|
||||
|
||||
|
||||
@@ -5,14 +5,15 @@ import { format, JsonRPcError } from 'json-rpc-peer'
|
||||
export async function set({
|
||||
pool,
|
||||
|
||||
// TODO: use camel case.
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
}) {
|
||||
await this.getXapi(pool).setPoolProperties({
|
||||
nameDescription,
|
||||
nameLabel,
|
||||
})
|
||||
pool = this.getXapiObject(pool)
|
||||
|
||||
await Promise.all([
|
||||
nameDescription !== undefined && pool.set_name_description(nameDescription),
|
||||
nameLabel !== undefined && pool.set_name_label(nameLabel),
|
||||
])
|
||||
}
|
||||
|
||||
set.params = {
|
||||
@@ -161,45 +162,30 @@ getPatchesDifference.resolve = {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function mergeInto({ source, target, force }) {
|
||||
const sourceHost = this.getObject(source.master)
|
||||
const targetHost = this.getObject(target.master)
|
||||
|
||||
if (sourceHost.productBrand !== targetHost.productBrand) {
|
||||
throw new Error(
|
||||
`a ${sourceHost.productBrand} pool cannot be merged into a ${
|
||||
targetHost.productBrand
|
||||
} pool`
|
||||
)
|
||||
}
|
||||
|
||||
const counterDiff = this.getPatchesDifference(source.master, target.master)
|
||||
if (counterDiff.length > 0) {
|
||||
const targetXapi = this.getXapi(target)
|
||||
await targetXapi.installPatches({
|
||||
patches: await targetXapi.findPatches(counterDiff),
|
||||
})
|
||||
}
|
||||
|
||||
const diff = this.getPatchesDifference(target.master, source.master)
|
||||
if (diff.length > 0) {
|
||||
const sourceXapi = this.getXapi(source)
|
||||
await sourceXapi.installPatches({
|
||||
patches: await sourceXapi.findPatches(diff),
|
||||
})
|
||||
}
|
||||
|
||||
await this.mergeXenPools(source._xapiId, target._xapiId, force)
|
||||
export async function mergeInto({ source, sources = [source], target, force }) {
|
||||
await this.checkPermissions(
|
||||
this.user.id,
|
||||
sources.map(source => [source, 'administrate'])
|
||||
)
|
||||
return this.mergeInto({
|
||||
force,
|
||||
sources,
|
||||
target,
|
||||
})
|
||||
}
|
||||
|
||||
mergeInto.params = {
|
||||
force: { type: 'boolean', optional: true },
|
||||
source: { type: 'string' },
|
||||
source: { type: 'string', optional: true },
|
||||
sources: {
|
||||
type: 'array',
|
||||
items: { type: 'string' },
|
||||
optional: true,
|
||||
},
|
||||
target: { type: 'string' },
|
||||
}
|
||||
|
||||
mergeInto.resolve = {
|
||||
source: ['source', 'pool', 'administrate'],
|
||||
target: ['target', 'pool', 'administrate'],
|
||||
}
|
||||
|
||||
|
||||
@@ -100,20 +100,24 @@ set.params = {
|
||||
optional: true,
|
||||
type: 'boolean',
|
||||
},
|
||||
readOnly: {
|
||||
optional: true,
|
||||
type: 'boolean',
|
||||
},
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function connect({ id }) {
|
||||
export async function enable({ id }) {
|
||||
this.updateXenServer(id, { enabled: true })::ignoreErrors()
|
||||
await this.connectXenServer(id)
|
||||
}
|
||||
|
||||
connect.description = 'connect a Xen server'
|
||||
enable.description = 'enable a Xen server'
|
||||
|
||||
connect.permission = 'admin'
|
||||
enable.permission = 'admin'
|
||||
|
||||
connect.params = {
|
||||
enable.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
@@ -121,16 +125,16 @@ connect.params = {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function disconnect({ id }) {
|
||||
export async function disable({ id }) {
|
||||
this.updateXenServer(id, { enabled: false })::ignoreErrors()
|
||||
await this.disconnectXenServer(id)
|
||||
}
|
||||
|
||||
disconnect.description = 'disconnect a Xen server'
|
||||
disable.description = 'disable a Xen server'
|
||||
|
||||
disconnect.permission = 'admin'
|
||||
disable.permission = 'admin'
|
||||
|
||||
disconnect.params = {
|
||||
disable.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
|
||||
@@ -10,14 +10,15 @@ import { forEach, parseXml } from '../utils'
|
||||
export async function set({
|
||||
sr,
|
||||
|
||||
// TODO: use camel case.
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
}) {
|
||||
await this.getXapi(sr).setSrProperties(sr._xapiId, {
|
||||
nameDescription,
|
||||
nameLabel,
|
||||
})
|
||||
sr = this.getXapiObject(sr)
|
||||
|
||||
await Promise.all([
|
||||
nameDescription !== undefined && sr.set_name_description(nameDescription),
|
||||
nameLabel !== undefined && sr.set_name_label(nameLabel),
|
||||
])
|
||||
}
|
||||
|
||||
set.params = {
|
||||
@@ -179,6 +180,35 @@ createIso.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function createFile({
|
||||
host,
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
location,
|
||||
}) {
|
||||
const xapi = this.getXapi(host)
|
||||
return xapi.createSr({
|
||||
hostRef: host._xapiRef,
|
||||
name_label: nameLabel,
|
||||
name_description: nameDescription,
|
||||
type: 'file',
|
||||
device_config: { location },
|
||||
})
|
||||
}
|
||||
|
||||
createFile.params = {
|
||||
host: { type: 'string' },
|
||||
nameLabel: { type: 'string' },
|
||||
nameDescription: { type: 'string' },
|
||||
location: { type: 'string' },
|
||||
}
|
||||
|
||||
createFile.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// NFS SR
|
||||
|
||||
@@ -361,6 +391,58 @@ createExt.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// This function helps to detect all ZFS pools
|
||||
// Return a dict of pools with their parameters { <poolname>: {<paramdict>}}
|
||||
// example output (the parameter mountpoint is of interest):
|
||||
// {"tank":
|
||||
// {
|
||||
// "setuid": "on", "relatime": "off", "referenced": "24K", "written": "24K", "zoned": "off", "primarycache": "all",
|
||||
// "logbias": "latency", "creation": "Mon May 27 17:24 2019", "sync": "standard", "snapdev": "hidden",
|
||||
// "dedup": "off", "sharenfs": "off", "usedbyrefreservation": "0B", "sharesmb": "off", "createtxg": "1",
|
||||
// "canmount": "on", "mountpoint": "/tank", "casesensitivity": "sensitive", "utf8only": "off", "xattr": "on",
|
||||
// "dnodesize": "legacy", "mlslabel": "none", "objsetid": "54", "defcontext": "none", "rootcontext": "none",
|
||||
// "mounted": "yes", "compression": "off", "overlay": "off", "logicalused": "47K", "usedbysnapshots": "0B",
|
||||
// "filesystem_count": "none", "copies": "1", "snapshot_limit": "none", "aclinherit": "restricted",
|
||||
// "compressratio": "1.00x", "readonly": "off", "version": "5", "normalization": "none", "filesystem_limit": "none",
|
||||
// "type": "filesystem", "secondarycache": "all", "refreservation": "none", "available": "17.4G", "used": "129K",
|
||||
// "exec": "on", "refquota": "none", "refcompressratio": "1.00x", "quota": "none", "keylocation": "none",
|
||||
// "snapshot_count": "none", "fscontext": "none", "vscan": "off", "reservation": "none", "atime": "on",
|
||||
// "recordsize": "128K", "usedbychildren": "105K", "usedbydataset": "24K", "guid": "656061077639704004",
|
||||
// "pbkdf2iters": "0", "checksum": "on", "special_small_blocks": "0", "redundant_metadata": "all",
|
||||
// "volmode": "default", "devices": "on", "keyformat": "none", "logicalreferenced": "12K", "acltype": "off",
|
||||
// "nbmand": "off", "context": "none", "encryption": "off", "snapdir": "hidden"}}
|
||||
export async function probeZfs({ host }) {
|
||||
const xapi = this.getXapi(host)
|
||||
try {
|
||||
const result = await xapi.call(
|
||||
'host.call_plugin',
|
||||
host._xapiRef,
|
||||
'zfs.py',
|
||||
'list_zfs_pools',
|
||||
{}
|
||||
)
|
||||
return JSON.parse(result)
|
||||
} catch (error) {
|
||||
if (
|
||||
error.code === 'XENAPI_MISSING_PLUGIN' ||
|
||||
error.code === 'UNKNOWN_XENAPI_PLUGIN_FUNCTION'
|
||||
) {
|
||||
return {}
|
||||
} else {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
probeZfs.params = {
|
||||
host: { type: 'string' },
|
||||
}
|
||||
|
||||
probeZfs.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// This function helps to detect all NFS shares (exports) on a NFS server
|
||||
// Return a table of exports with their paths and ACLs
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
export async function add({ tag, object }) {
|
||||
await this.getXapi(object).addTag(object._xapiId, tag)
|
||||
await this.getXapiObject(object).add_tags(tag)
|
||||
}
|
||||
|
||||
add.description = 'add a new tag to an object'
|
||||
@@ -16,7 +16,7 @@ add.params = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function remove({ tag, object }) {
|
||||
await this.getXapi(object).removeTag(object._xapiId, tag)
|
||||
await this.getXapiObject(object).remove_tags(tag)
|
||||
}
|
||||
|
||||
remove.description = 'remove an existing tag from an object'
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
import assert from 'assert'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
export function getPermissionsForUser({ userId }) {
|
||||
return this.getPermissionsForUser(userId)
|
||||
}
|
||||
@@ -86,3 +89,35 @@ copyVm.resolve = {
|
||||
vm: ['vm', 'VM'],
|
||||
sr: ['sr', 'SR'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function changeConnectedXapiHostname({
|
||||
hostname,
|
||||
newObject,
|
||||
oldObject,
|
||||
}) {
|
||||
const xapi = this.getXapi(oldObject)
|
||||
const { pool: currentPool } = xapi
|
||||
|
||||
xapi._setUrl({ ...xapi._url, hostname })
|
||||
await fromEvent(xapi.objects, 'finish')
|
||||
if (xapi.pool.$id === currentPool.$id) {
|
||||
await fromEvent(xapi.objects, 'finish')
|
||||
}
|
||||
|
||||
assert(xapi.pool.$id !== currentPool.$id)
|
||||
assert.doesNotThrow(() => this.getXapi(newObject))
|
||||
assert.throws(() => this.getXapi(oldObject))
|
||||
}
|
||||
|
||||
changeConnectedXapiHostname.description =
|
||||
'change the connected XAPI hostname and check if the pool and the local cache are updated'
|
||||
|
||||
changeConnectedXapiHostname.permission = 'admin'
|
||||
|
||||
changeConnectedXapiHostname.params = {
|
||||
hostname: { type: 'string' },
|
||||
newObject: { type: 'string', description: "new connection's XO object" },
|
||||
oldObject: { type: 'string', description: "current connection's XO object" },
|
||||
}
|
||||
|
||||
@@ -320,6 +320,11 @@ create.params = {
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
hvmBootFirmware: { type: 'string', optional: true },
|
||||
|
||||
// other params are passed to `editVm`
|
||||
'*': { type: 'any' },
|
||||
}
|
||||
|
||||
create.resolve = {
|
||||
@@ -560,6 +565,8 @@ set.params = {
|
||||
// Identifier of the VM to update.
|
||||
id: { type: 'string' },
|
||||
|
||||
auto_poweron: { type: 'boolean', optional: true },
|
||||
|
||||
name_label: { type: 'string', optional: true },
|
||||
|
||||
name_description: { type: 'string', optional: true },
|
||||
@@ -621,6 +628,11 @@ set.params = {
|
||||
|
||||
// set the VM network interface controller
|
||||
nicType: { type: ['string', 'null'], optional: true },
|
||||
|
||||
// set the VM boot firmware mode
|
||||
hvmBootFirmware: { type: ['string', 'null'], optional: true },
|
||||
|
||||
virtualizationMode: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
set.resolve = {
|
||||
@@ -1124,7 +1136,10 @@ resume.resolve = {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function revert({ snapshot, snapshotBefore }) {
|
||||
export async function revert({ snapshot, snapshotBefore }) {
|
||||
await this.checkPermissions(this.user.id, [
|
||||
[snapshot.$snapshot_of, 'operate'],
|
||||
])
|
||||
return this.getXapi(snapshot).revertVm(snapshot._xapiId, snapshotBefore)
|
||||
}
|
||||
|
||||
@@ -1134,7 +1149,7 @@ revert.params = {
|
||||
}
|
||||
|
||||
revert.resolve = {
|
||||
snapshot: ['snapshot', 'VM-snapshot', 'administrate'],
|
||||
snapshot: ['snapshot', 'VM-snapshot', 'view'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
@@ -1360,9 +1375,7 @@ createInterface.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function attachPci({ vm, pciId }) {
|
||||
const xapi = this.getXapi(vm)
|
||||
|
||||
await xapi.call('VM.add_to_other_config', vm._xapiRef, 'pci', pciId)
|
||||
await this.getXapiObject(vm).update_other_config('pci', pciId)
|
||||
}
|
||||
|
||||
attachPci.params = {
|
||||
@@ -1377,9 +1390,7 @@ attachPci.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function detachPci({ vm }) {
|
||||
const xapi = this.getXapi(vm)
|
||||
|
||||
await xapi.call('VM.remove_from_other_config', vm._xapiRef, 'pci')
|
||||
await this.getXapiObject(vm).update_other_config('pci', null)
|
||||
}
|
||||
|
||||
detachPci.params = {
|
||||
@@ -1416,7 +1427,7 @@ export async function setBootOrder({ vm, order }) {
|
||||
throw invalidParameters('You can only set the boot order on a HVM guest')
|
||||
}
|
||||
|
||||
await this.getXapiObject(vm).set_HVM_boot_params({ order })
|
||||
await this.getXapiObject(vm).update_HVM_boot_params('order', order)
|
||||
}
|
||||
|
||||
setBootOrder.params = {
|
||||
|
||||
@@ -55,6 +55,7 @@ getAllObjects.description = 'Returns all XO objects'
|
||||
getAllObjects.params = {
|
||||
filter: { type: 'object', optional: true },
|
||||
limit: { type: 'number', optional: true },
|
||||
ndjson: { type: 'boolean', optional: true },
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -446,9 +446,7 @@ const createNetworkAndInsertHosts = defer(async function(
|
||||
})
|
||||
if (result.exit !== 0) {
|
||||
throw invalidParameters(
|
||||
`Could not ping ${master.name_label}->${
|
||||
address.pif.$host.name_label
|
||||
} (${address.address}) \n${result.stdout}`
|
||||
`Could not ping ${master.name_label}->${address.pif.$host.name_label} (${address.address}) \n${result.stdout}`
|
||||
)
|
||||
}
|
||||
})
|
||||
@@ -887,10 +885,10 @@ async function createVDIOnLVMWithoutSizeLimit(xapi, lvmSr, diskSize) {
|
||||
await xapi.callAsync('SR.scan', xapi.getObject(lvmSr).$ref)
|
||||
const vdi = find(xapi.getObject(lvmSr).$VDIs, vdi => vdi.uuid === uuid)
|
||||
if (vdi != null) {
|
||||
await xapi.setSrProperties(vdi.$ref, {
|
||||
nameLabel: 'xosan_data',
|
||||
nameDescription: 'Created by XO',
|
||||
})
|
||||
await Promise.all([
|
||||
vdi.set_name_description('Created by XO'),
|
||||
vdi.set_name_label('xosan_data'),
|
||||
])
|
||||
return vdi
|
||||
}
|
||||
}
|
||||
@@ -1050,9 +1048,7 @@ export async function replaceBrick({
|
||||
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 1 }
|
||||
await glusterCmd(
|
||||
glusterEndpoint,
|
||||
`volume replace-brick xosan ${previousBrick} ${
|
||||
addressAndHost.brickName
|
||||
} commit force`
|
||||
`volume replace-brick xosan ${previousBrick} ${addressAndHost.brickName} commit force`
|
||||
)
|
||||
await glusterCmd(glusterEndpoint, 'peer detach ' + previousIp)
|
||||
data.nodes.splice(nodeIndex, 1, {
|
||||
@@ -1124,11 +1120,9 @@ async function _prepareGlusterVm(
|
||||
}
|
||||
}
|
||||
}
|
||||
await xapi.addTag(newVM.$id, 'XOSAN')
|
||||
await newVM.add_tags('XOSAN')
|
||||
await xapi.editVm(newVM, {
|
||||
name_label: `XOSAN - ${lvmSr.name_label} - ${
|
||||
host.name_label
|
||||
} ${labelSuffix}`,
|
||||
name_label: `XOSAN - ${lvmSr.name_label} - ${host.name_label} ${labelSuffix}`,
|
||||
name_description: 'Xosan VM storage',
|
||||
memory: memorySize,
|
||||
})
|
||||
|
||||
@@ -13,7 +13,6 @@ import includes from 'lodash/includes'
|
||||
import proxyConsole from './proxy-console'
|
||||
import pw from 'pw'
|
||||
import serveStatic from 'serve-static'
|
||||
import startsWith from 'lodash/startsWith'
|
||||
import stoppable from 'stoppable'
|
||||
import WebServer from 'http-server-plus'
|
||||
import WebSocket from 'ws'
|
||||
@@ -332,7 +331,7 @@ async function registerPluginsInPath(path) {
|
||||
|
||||
await Promise.all(
|
||||
mapToArray(files, name => {
|
||||
if (startsWith(name, PLUGIN_PREFIX)) {
|
||||
if (name.startsWith(PLUGIN_PREFIX)) {
|
||||
return registerPluginWrapper.call(
|
||||
this,
|
||||
`${path}/${name}`,
|
||||
@@ -428,7 +427,7 @@ const setUpProxies = (express, opts, xo) => {
|
||||
const { url } = req
|
||||
|
||||
for (const prefix in opts) {
|
||||
if (startsWith(url, prefix)) {
|
||||
if (url.startsWith(prefix)) {
|
||||
const target = opts[prefix]
|
||||
|
||||
proxy.web(req, res, {
|
||||
@@ -452,7 +451,7 @@ const setUpProxies = (express, opts, xo) => {
|
||||
const { url } = req
|
||||
|
||||
for (const prefix in opts) {
|
||||
if (startsWith(url, prefix)) {
|
||||
if (url.startsWith(prefix)) {
|
||||
const target = opts[prefix]
|
||||
|
||||
proxy.ws(req, socket, head, {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import Collection from '../collection/redis'
|
||||
import Model from '../model'
|
||||
import { forEach } from '../utils'
|
||||
import { forEach, serializeError } from '../utils'
|
||||
|
||||
import { parseProp } from './utils'
|
||||
|
||||
@@ -30,13 +30,28 @@ export class Servers extends Collection {
|
||||
|
||||
// Deserializes
|
||||
forEach(servers, server => {
|
||||
server.allowUnauthorized = server.allowUnauthorized === 'true'
|
||||
server.enabled = server.enabled === 'true'
|
||||
if (server.error) {
|
||||
server.error = parseProp('server', server, 'error', '')
|
||||
} else {
|
||||
delete server.error
|
||||
}
|
||||
server.readOnly = server.readOnly === 'true'
|
||||
})
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
_update(servers) {
|
||||
servers.map(server => {
|
||||
server.allowUnauthorized = server.allowUnauthorized ? 'true' : undefined
|
||||
server.enabled = server.enabled ? 'true' : undefined
|
||||
const { error } = server
|
||||
server.error =
|
||||
error != null ? JSON.stringify(serializeError(error)) : undefined
|
||||
server.readOnly = server.readOnly ? 'true' : undefined
|
||||
})
|
||||
return super._update(servers)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,9 +13,7 @@ export default function proxyConsole(ws, vmConsole, sessionId) {
|
||||
hostname = address
|
||||
|
||||
log.warn(
|
||||
`host is missing in console (${vmConsole.uuid}) URI (${
|
||||
vmConsole.location
|
||||
}) using host address (${address}) as fallback`
|
||||
`host is missing in console (${vmConsole.uuid}) URI (${vmConsole.location}) using host address (${address}) as fallback`
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import { startsWith } from 'lodash'
|
||||
|
||||
import ensureArray from './_ensureArray'
|
||||
import {
|
||||
extractProperty,
|
||||
@@ -78,6 +76,7 @@ const TRANSFORMS = {
|
||||
cores: cpuInfo && +cpuInfo.cpu_count,
|
||||
sockets: cpuInfo && +cpuInfo.socket_count,
|
||||
},
|
||||
zstdSupported: obj.restrictions.restrict_zstd_export === 'false',
|
||||
|
||||
// TODO
|
||||
// - ? networks = networksByPool.items[pool.id] (network.$pool.id)
|
||||
@@ -119,7 +118,7 @@ const TRANSFORMS = {
|
||||
size: update.installation_size,
|
||||
}
|
||||
|
||||
if (startsWith(update.name_label, 'XS')) {
|
||||
if (update.name_label.startsWith('XS')) {
|
||||
// It's a patch update but for homogeneity, we're still using pool_patches
|
||||
} else {
|
||||
supplementalPacks.push(formattedUpdate)
|
||||
@@ -143,7 +142,8 @@ const TRANSFORMS = {
|
||||
},
|
||||
current_operations: obj.current_operations,
|
||||
hostname: obj.hostname,
|
||||
iSCSI_name: otherConfig.iscsi_iqn || null,
|
||||
iscsiIqn: obj.iscsi_iqn ?? otherConfig.iscsi_iqn ?? '',
|
||||
zstdSupported: obj.license_params.restrict_zstd_export === 'false',
|
||||
license_params: obj.license_params,
|
||||
license_server: obj.license_server,
|
||||
license_expiry: toTimestamp(obj.license_params.expiry),
|
||||
@@ -265,6 +265,17 @@ const TRANSFORMS = {
|
||||
}
|
||||
}
|
||||
|
||||
// Build a { taskId → operation } map instead of forwarding the
|
||||
// { taskRef → operation } map directly
|
||||
const currentOperations = {}
|
||||
const { $xapi } = obj
|
||||
forEach(obj.current_operations, (operation, ref) => {
|
||||
const task = $xapi.getObjectByRef(ref, undefined)
|
||||
if (task !== undefined) {
|
||||
currentOperations[task.$id] = operation
|
||||
}
|
||||
})
|
||||
|
||||
const vm = {
|
||||
// type is redefined after for controllers/, templates &
|
||||
// snapshots.
|
||||
@@ -281,7 +292,7 @@ const TRANSFORMS = {
|
||||
? +metrics.VCPUs_number
|
||||
: +obj.VCPUs_at_startup,
|
||||
},
|
||||
current_operations: obj.current_operations,
|
||||
current_operations: currentOperations,
|
||||
docker: (function() {
|
||||
const monitor = otherConfig['xscontainer-monitor']
|
||||
if (!monitor) {
|
||||
@@ -519,6 +530,7 @@ const TRANSFORMS = {
|
||||
|
||||
name_description: obj.name_description,
|
||||
name_label: obj.name_label,
|
||||
parent: obj.sm_config['vhd-parent'],
|
||||
size: +obj.virtual_size,
|
||||
snapshots: link(obj, 'snapshots'),
|
||||
tags: obj.tags,
|
||||
|
||||
@@ -4,7 +4,6 @@ import synchronized from 'decorator-synchronized'
|
||||
import { BaseError } from 'make-error'
|
||||
import {
|
||||
defaults,
|
||||
endsWith,
|
||||
findKey,
|
||||
forEach,
|
||||
identity,
|
||||
@@ -184,7 +183,7 @@ const STATS = {
|
||||
transformValue: value => value * 1024,
|
||||
},
|
||||
memory: {
|
||||
test: metricType => endsWith(metricType, 'memory'),
|
||||
test: metricType => metricType.endsWith('memory'),
|
||||
},
|
||||
cpus: {
|
||||
test: /^cpu(\d+)$/,
|
||||
|
||||
@@ -22,8 +22,8 @@ import { forbiddenOperation } from 'xo-common/api-errors'
|
||||
import { Xapi as XapiBase, NULL_REF } from 'xen-api'
|
||||
import {
|
||||
every,
|
||||
find,
|
||||
filter,
|
||||
find,
|
||||
flatMap,
|
||||
flatten,
|
||||
groupBy,
|
||||
@@ -31,7 +31,6 @@ import {
|
||||
isEmpty,
|
||||
noop,
|
||||
omit,
|
||||
startsWith,
|
||||
uniq,
|
||||
} from 'lodash'
|
||||
import { satisfies as versionSatisfies } from 'semver'
|
||||
@@ -247,69 +246,6 @@ export default class Xapi extends XapiBase {
|
||||
)::ignoreErrors()
|
||||
}
|
||||
|
||||
async setHostProperties(id, { nameLabel, nameDescription }) {
|
||||
const host = this.getObject(id)
|
||||
await Promise.all([
|
||||
nameDescription !== undefined &&
|
||||
host.set_name_description(nameDescription),
|
||||
nameLabel !== undefined && host.set_name_label(nameLabel),
|
||||
])
|
||||
}
|
||||
|
||||
async setPoolProperties({ autoPoweron, nameLabel, nameDescription }) {
|
||||
const { pool } = this
|
||||
|
||||
await Promise.all([
|
||||
nameDescription !== undefined &&
|
||||
pool.set_name_description(nameDescription),
|
||||
nameLabel !== undefined && pool.set_name_label(nameLabel),
|
||||
autoPoweron != null &&
|
||||
pool.update_other_config('autoPoweron', autoPoweron ? 'true' : null),
|
||||
])
|
||||
}
|
||||
|
||||
async setSrProperties(id, { nameLabel, nameDescription }) {
|
||||
const sr = this.getObject(id)
|
||||
await Promise.all([
|
||||
nameDescription !== undefined && sr.set_name_description(nameDescription),
|
||||
nameLabel !== undefined && sr.set_name_label(nameLabel),
|
||||
])
|
||||
}
|
||||
|
||||
async setNetworkProperties(
|
||||
id,
|
||||
{ automatic, defaultIsLocked, nameDescription, nameLabel }
|
||||
) {
|
||||
let defaultLockingMode
|
||||
if (defaultIsLocked != null) {
|
||||
defaultLockingMode = defaultIsLocked ? 'disabled' : 'unlocked'
|
||||
}
|
||||
const network = this.getObject(id)
|
||||
await Promise.all([
|
||||
defaultLockingMode !== undefined &&
|
||||
network.set_default_locking_mode(defaultLockingMode),
|
||||
nameDescription !== undefined &&
|
||||
network.set_name_description(nameDescription),
|
||||
nameLabel !== undefined && network.set_name_label(nameLabel),
|
||||
automatic !== undefined &&
|
||||
network.update_other_config('automatic', automatic ? 'true' : null),
|
||||
])
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
async addTag(id, tag) {
|
||||
const { $ref: ref, $type: type } = this.getObject(id)
|
||||
|
||||
await this.call(`${type}.add_tags`, ref, tag)
|
||||
}
|
||||
|
||||
async removeTag(id, tag) {
|
||||
const { $ref: ref, $type: type } = this.getObject(id)
|
||||
|
||||
await this.call(`${type}.remove_tags`, ref, tag)
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
setDefaultSr(srId) {
|
||||
@@ -893,7 +829,7 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
|
||||
// If the VDI name start with `[NOBAK]`, do not export it.
|
||||
if (startsWith(vdi.name_label, '[NOBAK]')) {
|
||||
if (vdi.name_label.startsWith('[NOBAK]')) {
|
||||
// FIXME: find a way to not create the VDI snapshot in the
|
||||
// first time.
|
||||
//
|
||||
@@ -1019,17 +955,21 @@ export default class Xapi extends XapiBase {
|
||||
await this._createVmRecord({
|
||||
...delta.vm,
|
||||
affinity: null,
|
||||
blocked_operations: {
|
||||
...delta.vm.blocked_operations,
|
||||
start: 'Importing…',
|
||||
},
|
||||
ha_always_run: false,
|
||||
is_a_template: false,
|
||||
name_label: `[Importing…] ${name_label}`,
|
||||
other_config: {
|
||||
...delta.vm.other_config,
|
||||
[TAG_COPY_SRC]: delta.vm.uuid,
|
||||
},
|
||||
})
|
||||
)
|
||||
$defer.onFailure(() => this._deleteVm(vm))
|
||||
|
||||
await Promise.all([
|
||||
vm.set_name_label(`[Importing…] ${name_label}`),
|
||||
vm.update_blocked_operations('start', 'Importing…'),
|
||||
vm.update_other_config(TAG_COPY_SRC, delta.vm.uuid),
|
||||
])
|
||||
|
||||
// 2. Delete all VBDs which may have been created by the import.
|
||||
await asyncMap(vm.$VBDs, vbd => this._deleteVbd(vbd))::ignoreErrors()
|
||||
|
||||
@@ -1146,6 +1086,7 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
delta.vm.ha_always_run && vm.set_ha_always_run(true),
|
||||
vm.set_name_label(name_label),
|
||||
// FIXME: move
|
||||
vm.update_blocked_operations(
|
||||
@@ -1217,6 +1158,9 @@ export default class Xapi extends XapiBase {
|
||||
{
|
||||
force: 'true',
|
||||
}
|
||||
// FIXME: missing param `vgu_map`, it does not cause issues ATM but it
|
||||
// might need to be changed one day.
|
||||
// {},
|
||||
)::pCatch({ code: 'TOO_MANY_STORAGE_MIGRATES' }, () =>
|
||||
pDelay(1e4).then(loop)
|
||||
)
|
||||
@@ -1376,11 +1320,7 @@ export default class Xapi extends XapiBase {
|
||||
$defer.onFailure(() => this._deleteVm(vm))
|
||||
// Disable start and change the VM name label during import.
|
||||
await Promise.all([
|
||||
this.addForbiddenOperationToVm(
|
||||
vm.$id,
|
||||
'start',
|
||||
'OVA import in progress...'
|
||||
),
|
||||
vm.update_blocked_operations('start', 'OVA import in progress...'),
|
||||
vm.set_name_label(`[Importing...] ${nameLabel}`),
|
||||
])
|
||||
|
||||
@@ -1441,7 +1381,7 @@ export default class Xapi extends XapiBase {
|
||||
|
||||
// Enable start and restore the VM name label after import.
|
||||
await Promise.all([
|
||||
this.removeForbiddenOperationFromVm(vm.$id, 'start'),
|
||||
vm.update_blocked_operations('start', null),
|
||||
vm.set_name_label(nameLabel),
|
||||
])
|
||||
return vm
|
||||
@@ -1671,24 +1611,6 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
}
|
||||
|
||||
// vm_operations: http://xapi-project.github.io/xen-api/classes/vm.html
|
||||
async addForbiddenOperationToVm(vmId, operation, reason) {
|
||||
await this.call(
|
||||
'VM.add_to_blocked_operations',
|
||||
this.getObject(vmId).$ref,
|
||||
operation,
|
||||
`[XO] ${reason}`
|
||||
)
|
||||
}
|
||||
|
||||
async removeForbiddenOperationFromVm(vmId, operation) {
|
||||
await this.call(
|
||||
'VM.remove_from_blocked_operations',
|
||||
this.getObject(vmId).$ref,
|
||||
operation
|
||||
)
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
async createVbd({
|
||||
@@ -1804,9 +1726,7 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
|
||||
log.debug(
|
||||
`Moving VDI ${vdi.name_label} from ${vdi.$SR.name_label} to ${
|
||||
sr.name_label
|
||||
}`
|
||||
`Moving VDI ${vdi.name_label} from ${vdi.$SR.name_label} to ${sr.name_label}`
|
||||
)
|
||||
try {
|
||||
await pRetry(
|
||||
@@ -2216,6 +2136,16 @@ export default class Xapi extends XapiBase {
|
||||
mapToArray(bonds, bond => this.call('Bond.destroy', bond))
|
||||
)
|
||||
|
||||
const tunnels = filter(this.objects.all, { $type: 'tunnel' })
|
||||
await Promise.all(
|
||||
map(pifs, async pif => {
|
||||
const tunnel = find(tunnels, { access_PIF: pif.$ref })
|
||||
if (tunnel != null) {
|
||||
await this.callAsync('tunnel.destroy', tunnel.$ref)
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
await this.callAsync('network.destroy', network.$ref)
|
||||
}
|
||||
|
||||
@@ -2416,7 +2346,7 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
}
|
||||
|
||||
async _assertConsistentHostServerTime(hostRef) {
|
||||
async assertConsistentHostServerTime(hostRef) {
|
||||
const delta =
|
||||
parseDateTime(await this.call('host.get_servertime', hostRef)).getTime() -
|
||||
Date.now()
|
||||
@@ -2428,4 +2358,27 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async isHyperThreadingEnabled(hostId) {
|
||||
try {
|
||||
return (
|
||||
(await this.call(
|
||||
'host.call_plugin',
|
||||
this.getObject(hostId).$ref,
|
||||
'hyperthreading.py',
|
||||
'get_hyperthreading',
|
||||
{}
|
||||
)) !== 'false'
|
||||
)
|
||||
} catch (error) {
|
||||
if (
|
||||
error.code === 'XENAPI_MISSING_PLUGIN' ||
|
||||
error.code === 'UNKNOWN_XENAPI_PLUGIN_FUNCTION'
|
||||
) {
|
||||
return null
|
||||
} else {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,7 +74,6 @@ declare export class Xapi {
|
||||
): Promise<void>;
|
||||
_snapshotVm(cancelToken: mixed, vm: Vm, nameLabel?: string): Promise<Vm>;
|
||||
|
||||
addTag(object: Id, tag: string): Promise<void>;
|
||||
barrier(): Promise<void>;
|
||||
barrier(ref: string): Promise<XapiObject>;
|
||||
deleteVm(vm: Id): Promise<void>;
|
||||
|
||||
@@ -256,16 +256,12 @@ export default {
|
||||
) {
|
||||
if (getAll) {
|
||||
log(
|
||||
`patch ${
|
||||
patch.name
|
||||
} (${id}) conflicts with installed patch ${conflictId}`
|
||||
`patch ${patch.name} (${id}) conflicts with installed patch ${conflictId}`
|
||||
)
|
||||
return
|
||||
}
|
||||
throw new Error(
|
||||
`patch ${
|
||||
patch.name
|
||||
} (${id}) conflicts with installed patch ${conflictId}`
|
||||
`patch ${patch.name} (${id}) conflicts with installed patch ${conflictId}`
|
||||
)
|
||||
}
|
||||
|
||||
@@ -292,9 +288,7 @@ export default {
|
||||
if (!installed[id] && find(installable, { id }) === undefined) {
|
||||
if (requiredPatch.paid && freeHost) {
|
||||
throw new Error(
|
||||
`required patch ${
|
||||
requiredPatch.name
|
||||
} (${id}) requires a XenServer license`
|
||||
`required patch ${requiredPatch.name} (${id}) requires a XenServer license`
|
||||
)
|
||||
}
|
||||
installable.push(requiredPatch)
|
||||
|
||||
@@ -84,4 +84,32 @@ export default {
|
||||
})
|
||||
return unhealthyVdis
|
||||
},
|
||||
|
||||
async createSr({
|
||||
hostRef,
|
||||
|
||||
content_type = 'user', // recommended by Citrix
|
||||
device_config = {},
|
||||
name_description = '',
|
||||
name_label,
|
||||
shared = false,
|
||||
physical_size = 0,
|
||||
sm_config = {},
|
||||
type,
|
||||
}) {
|
||||
const srRef = await this.call(
|
||||
'SR.create',
|
||||
hostRef,
|
||||
device_config,
|
||||
physical_size,
|
||||
name_label,
|
||||
name_description,
|
||||
type,
|
||||
content_type,
|
||||
shared,
|
||||
sm_config
|
||||
)
|
||||
|
||||
return (await this.barrier(srRef)).uuid
|
||||
},
|
||||
}
|
||||
|
||||
@@ -107,15 +107,12 @@ export default {
|
||||
|
||||
if (isHvm) {
|
||||
if (!isEmpty(vdis) || installMethod === 'network') {
|
||||
const { HVM_boot_params: bootParams } = vm
|
||||
let order = bootParams.order
|
||||
if (order) {
|
||||
order = 'n' + order.replace('n', '')
|
||||
} else {
|
||||
order = 'ncd'
|
||||
}
|
||||
const { order } = vm.HVM_boot_params
|
||||
|
||||
vm.set_HVM_boot_params({ ...bootParams, order })
|
||||
vm.update_HVM_boot_params(
|
||||
'order',
|
||||
order ? 'n' + order.replace('n', '') : 'ncd'
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// PV
|
||||
@@ -262,17 +259,14 @@ export default {
|
||||
affinityHost: {
|
||||
get: 'affinity',
|
||||
set: (value, vm) =>
|
||||
vm.set_affinity(value ? this.getObject(value).$ref : NULL_REF),
|
||||
vm.set_affinity(value ? vm.$xapi.getObject(value).$ref : NULL_REF),
|
||||
},
|
||||
|
||||
autoPoweron: {
|
||||
set(value, vm) {
|
||||
return Promise.all([
|
||||
vm.update_other_config('autoPoweron', value ? 'true' : null),
|
||||
value &&
|
||||
this.setPoolProperties({
|
||||
autoPoweron: true,
|
||||
}),
|
||||
vm.update_other_config('auto_poweron', value ? 'true' : null),
|
||||
value && vm.$pool.update_other_config('auto_poweron', 'true'),
|
||||
])
|
||||
},
|
||||
},
|
||||
@@ -312,7 +306,9 @@ export default {
|
||||
get: vm => +vm.VCPUs_at_startup,
|
||||
set: [
|
||||
'VCPUs_at_startup',
|
||||
(value, vm) => isVmRunning(vm) && vm.set_VCPUs_number_live(value),
|
||||
(value, vm) =>
|
||||
isVmRunning(vm) &&
|
||||
vm.$xapi.call('VM.set_VCPUs_number_live', vm.$ref, String(value)),
|
||||
],
|
||||
},
|
||||
|
||||
@@ -455,6 +451,10 @@ export default {
|
||||
get: vm => +vm.start_delay,
|
||||
set: (startDelay, vm) => vm.set_start_delay(startDelay),
|
||||
},
|
||||
|
||||
hvmBootFirmware: {
|
||||
set: (firmware, vm) => vm.update_HVM_boot_params('firmware', firmware),
|
||||
},
|
||||
}),
|
||||
|
||||
async editVm(id, props, checkLimits) {
|
||||
|
||||
@@ -60,8 +60,9 @@ function checkParams(method, params) {
|
||||
|
||||
const result = schemaInspector.validate(
|
||||
{
|
||||
type: 'object',
|
||||
properties: schema,
|
||||
strict: true,
|
||||
type: 'object',
|
||||
},
|
||||
params
|
||||
)
|
||||
@@ -261,11 +262,15 @@ export default class Api {
|
||||
//
|
||||
// The goal here is to standardize the calls by always providing
|
||||
// an id parameter when possible to simplify calls to the API.
|
||||
if (params != null && params.id === undefined) {
|
||||
if (params?.id === undefined) {
|
||||
const namespace = name.slice(0, name.indexOf('.'))
|
||||
const id = params[namespace]
|
||||
if (typeof id === 'string') {
|
||||
params.id = id
|
||||
const spec = method.params
|
||||
if (spec !== undefined && 'id' in spec && !(namespace in spec)) {
|
||||
const id = params[namespace]
|
||||
if (typeof id === 'string') {
|
||||
delete params[namespace]
|
||||
params.id = id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import ms from 'ms'
|
||||
import { forEach, isEmpty, iteratee, sortedIndexBy } from 'lodash'
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
const isSkippedError = error =>
|
||||
error.message === 'no disks found' ||
|
||||
noSuchObject.is(error) ||
|
||||
error.message === 'no VMs match this pattern' ||
|
||||
error.message === 'unhealthy VDI chain'
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import {
|
||||
isEmpty,
|
||||
last,
|
||||
mapValues,
|
||||
merge,
|
||||
noop,
|
||||
some,
|
||||
sum,
|
||||
@@ -29,6 +30,7 @@ import {
|
||||
ignoreErrors,
|
||||
pFinally,
|
||||
pFromEvent,
|
||||
timeout,
|
||||
} from 'promise-toolbox'
|
||||
import Vhd, {
|
||||
chainVhd,
|
||||
@@ -41,6 +43,7 @@ import { type CallJob, type Executor, type Job } from '../jobs'
|
||||
import { type Schedule } from '../scheduling'
|
||||
|
||||
import createSizeStream from '../../size-stream'
|
||||
import parseDuration from '../../_parseDuration'
|
||||
import {
|
||||
type DeltaVmExport,
|
||||
type DeltaVmImport,
|
||||
@@ -66,6 +69,7 @@ export type Mode = 'full' | 'delta'
|
||||
export type ReportWhen = 'always' | 'failure' | 'never'
|
||||
|
||||
type Settings = {|
|
||||
bypassVdiChainsCheck?: boolean,
|
||||
concurrency?: number,
|
||||
deleteFirst?: boolean,
|
||||
copyRetention?: number,
|
||||
@@ -137,6 +141,7 @@ const getOldEntries = <T>(retention: number, entries?: T[]): T[] =>
|
||||
: entries
|
||||
|
||||
const defaultSettings: Settings = {
|
||||
bypassVdiChainsCheck: false,
|
||||
concurrency: 0,
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
@@ -286,7 +291,7 @@ const importers: $Dict<
|
||||
xapi.importVm(xva, { srId: sr.$id })
|
||||
)
|
||||
await Promise.all([
|
||||
xapi.addTag(vm.$id, 'restored from backup'),
|
||||
vm.add_tags('restored from backup'),
|
||||
xapi.editVm(vm.$id, {
|
||||
name_label: `${metadata.vm.name_label} (${safeDateFormat(
|
||||
metadata.timestamp
|
||||
@@ -450,7 +455,7 @@ const disableVmHighAvailability = async (xapi: Xapi, vm: Vm) => {
|
||||
|
||||
return Promise.all([
|
||||
vm.set_ha_restart_priority(''),
|
||||
xapi.addTag(vm.$ref, 'HA disabled'),
|
||||
vm.add_tags('HA disabled'),
|
||||
])
|
||||
}
|
||||
|
||||
@@ -507,9 +512,17 @@ const disableVmHighAvailability = async (xapi: Xapi, vm: Vm) => {
|
||||
// │ │ ├─ task.start(message: 'transfer')
|
||||
// │ │ │ ├─ task.warning(message: string)
|
||||
// │ │ │ └─ task.end(result: { size: number })
|
||||
// │ │ │
|
||||
// │ │ │ // in case of full backup, DR and CR
|
||||
// │ │ ├─ task.start(message: 'clean')
|
||||
// │ │ │ ├─ task.warning(message: string)
|
||||
// │ │ │ └─ task.end
|
||||
// │ │ │
|
||||
// │ │ │ // in case of delta backup
|
||||
// │ │ ├─ task.start(message: 'merge')
|
||||
// │ │ │ ├─ task.warning(message: string)
|
||||
// │ │ │ └─ task.end(result: { size: number })
|
||||
// │ │ │
|
||||
// │ │ └─ task.end
|
||||
// │ └─ task.end
|
||||
// └─ job.end
|
||||
@@ -536,17 +549,18 @@ export default class BackupNg {
|
||||
return this._runningRestores
|
||||
}
|
||||
|
||||
constructor(app: any) {
|
||||
constructor(app: any, { backup }) {
|
||||
this._app = app
|
||||
this._logger = undefined
|
||||
this._runningRestores = new Set()
|
||||
this._backupOptions = backup
|
||||
|
||||
app.on('start', async () => {
|
||||
this._logger = await app.getLogger('restore')
|
||||
|
||||
const executor: Executor = async ({
|
||||
cancelToken,
|
||||
data: vmsId,
|
||||
data,
|
||||
job: job_,
|
||||
logger,
|
||||
runJobId,
|
||||
@@ -556,6 +570,8 @@ export default class BackupNg {
|
||||
throw new Error('backup job cannot run without a schedule')
|
||||
}
|
||||
|
||||
let vmsId = data?.vms
|
||||
|
||||
const job: BackupJob = (job_: any)
|
||||
const vmsPattern = job.vms
|
||||
|
||||
@@ -609,7 +625,9 @@ export default class BackupNg {
|
||||
}))
|
||||
)
|
||||
|
||||
const timeout = getSetting(job.settings, 'timeout', [''])
|
||||
const settings = merge(job.settings, data?.settings)
|
||||
|
||||
const timeout = getSetting(settings, 'timeout', [''])
|
||||
if (timeout !== 0) {
|
||||
const source = CancelToken.source([cancelToken])
|
||||
cancelToken = source.token
|
||||
@@ -642,6 +660,7 @@ export default class BackupNg {
|
||||
schedule,
|
||||
logger,
|
||||
taskId,
|
||||
settings,
|
||||
srs,
|
||||
remotes
|
||||
)
|
||||
@@ -649,7 +668,7 @@ export default class BackupNg {
|
||||
// 2018-07-20, JFT: vmTimeout is disabled for the time being until
|
||||
// we figure out exactly how it should behave.
|
||||
//
|
||||
// const vmTimeout: number = getSetting(job.settings, 'vmTimeout', [
|
||||
// const vmTimeout: number = getSetting(settings, 'vmTimeout', [
|
||||
// uuid,
|
||||
// scheduleId,
|
||||
// ])
|
||||
@@ -678,9 +697,7 @@ export default class BackupNg {
|
||||
}
|
||||
}
|
||||
|
||||
const concurrency: number = getSetting(job.settings, 'concurrency', [
|
||||
'',
|
||||
])
|
||||
const concurrency: number = getSetting(settings, 'concurrency', [''])
|
||||
if (concurrency !== 0) {
|
||||
handleVm = limitConcurrency(concurrency)(handleVm)
|
||||
logger.notice('vms', {
|
||||
@@ -919,6 +936,7 @@ export default class BackupNg {
|
||||
schedule: Schedule,
|
||||
logger: any,
|
||||
taskId: string,
|
||||
settings: Settings,
|
||||
srs: any[],
|
||||
remotes: any[]
|
||||
): Promise<void> {
|
||||
@@ -946,7 +964,7 @@ export default class BackupNg {
|
||||
)
|
||||
}
|
||||
|
||||
const { id: jobId, mode, settings } = job
|
||||
const { id: jobId, mode } = job
|
||||
const { id: scheduleId } = schedule
|
||||
|
||||
let exportRetention: number = getSetting(settings, 'exportRetention', [
|
||||
@@ -1007,7 +1025,14 @@ export default class BackupNg {
|
||||
.filter(_ => _.other_config['xo:backup:job'] === jobId)
|
||||
.sort(compareSnapshotTime)
|
||||
|
||||
xapi._assertHealthyVdiChains(vm)
|
||||
const bypassVdiChainsCheck: boolean = getSetting(
|
||||
settings,
|
||||
'bypassVdiChainsCheck',
|
||||
[vmUuid, '']
|
||||
)
|
||||
if (!bypassVdiChainsCheck) {
|
||||
xapi._assertHealthyVdiChains(vm)
|
||||
}
|
||||
|
||||
const offlineSnapshot: boolean = getSetting(settings, 'offlineSnapshot', [
|
||||
vmUuid,
|
||||
@@ -1191,11 +1216,20 @@ export default class BackupNg {
|
||||
)
|
||||
): any)
|
||||
|
||||
const deleteOldBackups = () =>
|
||||
wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'clean',
|
||||
parentId: taskId,
|
||||
},
|
||||
this._deleteFullVmBackups(handler, oldBackups)
|
||||
)
|
||||
const deleteFirst = getSetting(settings, 'deleteFirst', [
|
||||
remoteId,
|
||||
])
|
||||
if (deleteFirst) {
|
||||
await this._deleteFullVmBackups(handler, oldBackups)
|
||||
await deleteOldBackups()
|
||||
}
|
||||
|
||||
await wrapTask(
|
||||
@@ -1211,7 +1245,7 @@ export default class BackupNg {
|
||||
await handler.outputFile(metadataFilename, jsonMetadata)
|
||||
|
||||
if (!deleteFirst) {
|
||||
await this._deleteFullVmBackups(handler, oldBackups)
|
||||
await deleteOldBackups()
|
||||
}
|
||||
}
|
||||
)
|
||||
@@ -1242,9 +1276,18 @@ export default class BackupNg {
|
||||
listReplicatedVms(xapi, scheduleId, srId, vmUuid)
|
||||
)
|
||||
|
||||
const deleteOldBackups = () =>
|
||||
wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'clean',
|
||||
parentId: taskId,
|
||||
},
|
||||
this._deleteVms(xapi, oldVms)
|
||||
)
|
||||
const deleteFirst = getSetting(settings, 'deleteFirst', [srId])
|
||||
if (deleteFirst) {
|
||||
await this._deleteVms(xapi, oldVms)
|
||||
await deleteOldBackups()
|
||||
}
|
||||
|
||||
const vm = await xapi.barrier(
|
||||
@@ -1266,7 +1309,7 @@ export default class BackupNg {
|
||||
)
|
||||
|
||||
await Promise.all([
|
||||
xapi.addTag(vm.$ref, 'Disaster Recovery'),
|
||||
vm.add_tags('Disaster Recovery'),
|
||||
disableVmHighAvailability(xapi, vm),
|
||||
vm.update_blocked_operations(
|
||||
'start',
|
||||
@@ -1276,7 +1319,7 @@ export default class BackupNg {
|
||||
])
|
||||
|
||||
if (!deleteFirst) {
|
||||
await this._deleteVms(xapi, oldVms)
|
||||
await deleteOldBackups()
|
||||
}
|
||||
}
|
||||
)
|
||||
@@ -1602,9 +1645,19 @@ export default class BackupNg {
|
||||
listReplicatedVms(xapi, scheduleId, srId, vmUuid)
|
||||
)
|
||||
|
||||
const deleteOldBackups = () =>
|
||||
wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'clean',
|
||||
parentId: taskId,
|
||||
},
|
||||
this._deleteVms(xapi, oldVms)
|
||||
)
|
||||
|
||||
const deleteFirst = getSetting(settings, 'deleteFirst', [srId])
|
||||
if (deleteFirst) {
|
||||
await this._deleteVms(xapi, oldVms)
|
||||
await deleteOldBackups()
|
||||
}
|
||||
|
||||
const { vm } = await wrapTask(
|
||||
@@ -1624,7 +1677,7 @@ export default class BackupNg {
|
||||
)
|
||||
|
||||
await Promise.all([
|
||||
xapi.addTag(vm.$ref, 'Continuous Replication'),
|
||||
vm.add_tags('Continuous Replication'),
|
||||
disableVmHighAvailability(xapi, vm),
|
||||
vm.update_blocked_operations(
|
||||
'start',
|
||||
@@ -1634,7 +1687,7 @@ export default class BackupNg {
|
||||
])
|
||||
|
||||
if (!deleteFirst) {
|
||||
await this._deleteVms(xapi, oldVms)
|
||||
await deleteOldBackups()
|
||||
}
|
||||
}
|
||||
)
|
||||
@@ -1761,6 +1814,16 @@ export default class BackupNg {
|
||||
const path = `${dir}/${file}`
|
||||
try {
|
||||
const metadata = JSON.parse(String(await handler.readFile(path)))
|
||||
if (metadata.mode === 'full') {
|
||||
metadata.size = await timeout
|
||||
.call(
|
||||
handler.getSize(resolveRelativeFromFile(path, metadata.xva)),
|
||||
parseDuration(this._backupOptions.vmBackupSizeTimeout)
|
||||
)
|
||||
.catch(err => {
|
||||
log.warn(`_listVmBackups, getSize`, { err })
|
||||
})
|
||||
}
|
||||
if (predicate === undefined || predicate(metadata)) {
|
||||
Object.defineProperty(metadata, '_filename', {
|
||||
value: path,
|
||||
|
||||
@@ -10,17 +10,7 @@ import { createReadStream, readdir, stat } from 'fs'
|
||||
import { satisfies as versionSatisfies } from 'semver'
|
||||
import { utcFormat } from 'd3-time-format'
|
||||
import { basename, dirname } from 'path'
|
||||
import {
|
||||
endsWith,
|
||||
filter,
|
||||
find,
|
||||
includes,
|
||||
once,
|
||||
range,
|
||||
sortBy,
|
||||
startsWith,
|
||||
trim,
|
||||
} from 'lodash'
|
||||
import { filter, find, includes, once, range, sortBy, trim } from 'lodash'
|
||||
import {
|
||||
chainVhd,
|
||||
createSyntheticStream as createVhdReadStream,
|
||||
@@ -104,7 +94,7 @@ const getVdiTimestamp = name => {
|
||||
|
||||
const getDeltaBackupNameWithoutExt = name =>
|
||||
name.slice(0, -DELTA_BACKUP_EXT_LENGTH)
|
||||
const isDeltaBackup = name => endsWith(name, DELTA_BACKUP_EXT)
|
||||
const isDeltaBackup = name => name.endsWith(DELTA_BACKUP_EXT)
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -308,13 +298,13 @@ export default class {
|
||||
const handler = await this._xo.getRemoteHandler(remoteId)
|
||||
|
||||
// List backups. (No delta)
|
||||
const backupFilter = file => endsWith(file, '.xva')
|
||||
const backupFilter = file => file.endsWith('.xva')
|
||||
|
||||
const files = await handler.list('.')
|
||||
const backups = filter(files, backupFilter)
|
||||
|
||||
// List delta backups.
|
||||
const deltaDirs = filter(files, file => startsWith(file, 'vm_delta_'))
|
||||
const deltaDirs = filter(files, file => file.startsWith('vm_delta_'))
|
||||
|
||||
for (const deltaDir of deltaDirs) {
|
||||
const files = await handler.list(deltaDir)
|
||||
@@ -336,12 +326,12 @@ export default class {
|
||||
const backups = []
|
||||
|
||||
await asyncMap(handler.list('.'), entry => {
|
||||
if (endsWith(entry, '.xva')) {
|
||||
if (entry.endsWith('.xva')) {
|
||||
backups.push(parseVmBackupPath(entry))
|
||||
} else if (startsWith(entry, 'vm_delta_')) {
|
||||
} else if (entry.startsWith('vm_delta_')) {
|
||||
return handler.list(entry).then(children =>
|
||||
asyncMap(children, child => {
|
||||
if (endsWith(child, '.json')) {
|
||||
if (child.endsWith('.json')) {
|
||||
const path = `${entry}/${child}`
|
||||
|
||||
const record = parseVmBackupPath(path)
|
||||
@@ -372,7 +362,7 @@ export default class {
|
||||
|
||||
const { datetime } = parseVmBackupPath(file)
|
||||
await Promise.all([
|
||||
xapi.addTag(vm.$id, 'restored from backup'),
|
||||
vm.add_tags('restored from backup'),
|
||||
xapi.editVm(vm.$id, {
|
||||
name_label: `${vm.name_label} (${shortDate(datetime * 1e3)})`,
|
||||
}),
|
||||
@@ -411,9 +401,7 @@ export default class {
|
||||
localBaseUuid,
|
||||
{
|
||||
bypassVdiChainsCheck: force,
|
||||
snapshotNameLabel: `XO_DELTA_EXPORT: ${targetSr.name_label} (${
|
||||
targetSr.uuid
|
||||
})`,
|
||||
snapshotNameLabel: `XO_DELTA_EXPORT: ${targetSr.name_label} (${targetSr.uuid})`,
|
||||
}
|
||||
)
|
||||
$defer.onFailure(() => srcXapi.deleteVm(delta.vm.uuid))
|
||||
@@ -972,12 +960,13 @@ export default class {
|
||||
nameLabel: copyName,
|
||||
})
|
||||
|
||||
data.vm.update_blocked_operations(
|
||||
'start',
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
)
|
||||
|
||||
await targetXapi.addTag(data.vm.$id, 'Disaster Recovery')
|
||||
await Promise.all([
|
||||
data.vm.add_tags('Disaster Recovery'),
|
||||
data.vm.update_blocked_operations(
|
||||
'start',
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
),
|
||||
])
|
||||
|
||||
if (!deleteOldBackupsFirst) {
|
||||
await this._removeVms(targetXapi, vmsToRemove)
|
||||
@@ -1008,7 +997,7 @@ export default class {
|
||||
// Currently, the filenames of the VHD changes over time
|
||||
// (delta → full), but the JSON is not updated, therefore the
|
||||
// VHD path may need to be fixed.
|
||||
return endsWith(vhdPath, '_delta.vhd')
|
||||
return vhdPath.endsWith('_delta.vhd')
|
||||
? pFromCallback(cb => stat(vhdPath, cb)).then(
|
||||
() => vhdPath,
|
||||
error => {
|
||||
|
||||
@@ -43,6 +43,20 @@ type MetadataBackupJob = {
|
||||
xoMetadata?: boolean,
|
||||
}
|
||||
|
||||
const logInstantFailureTask = (logger, { data, error, message, parentId }) => {
|
||||
const taskId = logger.notice(message, {
|
||||
data,
|
||||
event: 'task.start',
|
||||
parentId,
|
||||
})
|
||||
logger.error(message, {
|
||||
event: 'task.end',
|
||||
result: serializeError(error),
|
||||
status: 'failure',
|
||||
taskId,
|
||||
})
|
||||
}
|
||||
|
||||
const createSafeReaddir = (handler, methodName) => (path, options) =>
|
||||
handler.list(path, options).catch(error => {
|
||||
if (error?.code !== 'ENOENT') {
|
||||
@@ -97,7 +111,7 @@ const deleteOldBackups = (handler, dir, retention, handleError) =>
|
||||
// Task logs emitted in a metadata backup execution:
|
||||
//
|
||||
// job.start(data: { reportWhen: ReportWhen })
|
||||
// ├─ task.start(data: { type: 'pool', id: string, pool: <Pool />, poolMaster: <Host /> })
|
||||
// ├─ task.start(data: { type: 'pool', id: string, pool?: <Pool />, poolMaster?: <Host /> })
|
||||
// │ ├─ task.start(data: { type: 'remote', id: string })
|
||||
// │ │ └─ task.end
|
||||
// │ └─ task.end
|
||||
@@ -190,9 +204,7 @@ export default class metadataBackup {
|
||||
|
||||
await asyncMap(handlers, async (handler, remoteId) => {
|
||||
const subTaskId = logger.notice(
|
||||
`Starting XO metadata backup for the remote (${remoteId}). (${
|
||||
job.id
|
||||
})`,
|
||||
`Starting XO metadata backup for the remote (${remoteId}). (${job.id})`,
|
||||
{
|
||||
data: {
|
||||
id: remoteId,
|
||||
@@ -230,9 +242,7 @@ export default class metadataBackup {
|
||||
)
|
||||
|
||||
logger.notice(
|
||||
`Backuping XO metadata for the remote (${remoteId}) is a success. (${
|
||||
job.id
|
||||
})`,
|
||||
`Backuping XO metadata for the remote (${remoteId}) is a success. (${job.id})`,
|
||||
{
|
||||
event: 'task.end',
|
||||
status: 'success',
|
||||
@@ -251,9 +261,7 @@ export default class metadataBackup {
|
||||
})
|
||||
|
||||
logger.error(
|
||||
`Backuping XO metadata for the remote (${remoteId}) has failed. (${
|
||||
job.id
|
||||
})`,
|
||||
`Backuping XO metadata for the remote (${remoteId}) has failed. (${job.id})`,
|
||||
{
|
||||
event: 'task.end',
|
||||
result: serializeError(error),
|
||||
@@ -326,9 +334,7 @@ export default class metadataBackup {
|
||||
|
||||
await asyncMap(handlers, async (handler, remoteId) => {
|
||||
const subTaskId = logger.notice(
|
||||
`Starting metadata backup for the pool (${poolId}) for the remote (${remoteId}). (${
|
||||
job.id
|
||||
})`,
|
||||
`Starting metadata backup for the pool (${poolId}) for the remote (${remoteId}). (${job.id})`,
|
||||
{
|
||||
data: {
|
||||
id: remoteId,
|
||||
@@ -378,9 +384,7 @@ export default class metadataBackup {
|
||||
)
|
||||
|
||||
logger.notice(
|
||||
`Backuping pool metadata (${poolId}) for the remote (${remoteId}) is a success. (${
|
||||
job.id
|
||||
})`,
|
||||
`Backuping pool metadata (${poolId}) for the remote (${remoteId}) is a success. (${job.id})`,
|
||||
{
|
||||
event: 'task.end',
|
||||
status: 'success',
|
||||
@@ -402,9 +406,7 @@ export default class metadataBackup {
|
||||
})
|
||||
|
||||
logger.error(
|
||||
`Backuping pool metadata (${poolId}) for the remote (${remoteId}) has failed. (${
|
||||
job.id
|
||||
})`,
|
||||
`Backuping pool metadata (${poolId}) for the remote (${remoteId}) has failed. (${job.id})`,
|
||||
{
|
||||
event: 'task.end',
|
||||
result: serializeError(error),
|
||||
@@ -527,16 +529,15 @@ export default class metadataBackup {
|
||||
try {
|
||||
xapi = this._app.getXapi(id)
|
||||
} catch (error) {
|
||||
logger.warning(
|
||||
`unable to get the xapi associated to the pool (${id})`,
|
||||
{
|
||||
event: 'task.warning',
|
||||
taskId: runJobId,
|
||||
data: {
|
||||
error,
|
||||
},
|
||||
}
|
||||
)
|
||||
logInstantFailureTask(logger, {
|
||||
data: {
|
||||
type: 'pool',
|
||||
id,
|
||||
},
|
||||
error,
|
||||
message: `unable to get the xapi associated to the pool (${id})`,
|
||||
parentId: runJobId,
|
||||
})
|
||||
}
|
||||
if (xapi !== undefined) {
|
||||
promises.push(
|
||||
|
||||
99
packages/xo-server/src/xo-mixins/pool.js
Normal file
99
packages/xo-server/src/xo-mixins/pool.js
Normal file
@@ -0,0 +1,99 @@
|
||||
import { difference, flatten, isEmpty, uniq } from 'lodash'
|
||||
|
||||
export default class Pools {
|
||||
constructor(xo) {
|
||||
this._xo = xo
|
||||
}
|
||||
|
||||
async mergeInto({ sources: sourceIds, target, force }) {
|
||||
const { _xo } = this
|
||||
const targetHost = _xo.getObject(target.master)
|
||||
const sources = []
|
||||
const sourcePatches = {}
|
||||
|
||||
// Check hosts compatibility.
|
||||
for (const sourceId of sourceIds) {
|
||||
const source = _xo.getObject(sourceId)
|
||||
const sourceHost = _xo.getObject(source.master)
|
||||
if (sourceHost.productBrand !== targetHost.productBrand) {
|
||||
throw new Error(
|
||||
`a ${sourceHost.productBrand} pool cannot be merged into a ${targetHost.productBrand} pool`
|
||||
)
|
||||
}
|
||||
if (sourceHost.version !== targetHost.version) {
|
||||
throw new Error('The hosts are not compatible')
|
||||
}
|
||||
sources.push(source)
|
||||
sourcePatches[sourceId] = sourceHost.patches
|
||||
}
|
||||
|
||||
// Find missing patches on the target.
|
||||
const targetRequiredPatches = uniq(
|
||||
flatten(
|
||||
await Promise.all(
|
||||
sources.map(({ master }) =>
|
||||
_xo.getPatchesDifference(master, target.master)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
// Find missing patches on the sources.
|
||||
const allRequiredPatches = targetRequiredPatches.concat(
|
||||
targetHost.patches.map(patchId => _xo.getObject(patchId).name)
|
||||
)
|
||||
const sourceRequiredPatches = {}
|
||||
for (const sourceId of sourceIds) {
|
||||
const _sourcePatches = sourcePatches[sourceId].map(
|
||||
patchId => _xo.getObject(patchId).name
|
||||
)
|
||||
const requiredPatches = difference(allRequiredPatches, _sourcePatches)
|
||||
if (requiredPatches.length > 0) {
|
||||
sourceRequiredPatches[sourceId] = requiredPatches
|
||||
}
|
||||
}
|
||||
|
||||
// On XCP-ng, "installPatches" installs *all* the patches
|
||||
// whatever the patches argument is.
|
||||
// So we must not call it if there are no patches to install.
|
||||
if (targetRequiredPatches.length > 0 || !isEmpty(sourceRequiredPatches)) {
|
||||
// Find patches in parallel.
|
||||
const findPatchesPromises = []
|
||||
const sourceXapis = {}
|
||||
const targetXapi = _xo.getXapi(target)
|
||||
for (const sourceId of sourceIds) {
|
||||
const sourceXapi = (sourceXapis[sourceId] = _xo.getXapi(sourceId))
|
||||
findPatchesPromises.push(
|
||||
sourceXapi.findPatches(sourceRequiredPatches[sourceId] ?? [])
|
||||
)
|
||||
}
|
||||
const patchesName = await Promise.all([
|
||||
targetXapi.findPatches(targetRequiredPatches),
|
||||
...findPatchesPromises,
|
||||
])
|
||||
|
||||
// Install patches in parallel.
|
||||
const installPatchesPromises = []
|
||||
installPatchesPromises.push(
|
||||
targetXapi.installPatches({
|
||||
patches: patchesName[0],
|
||||
})
|
||||
)
|
||||
let i = 1
|
||||
for (const sourceId of sourceIds) {
|
||||
installPatchesPromises.push(
|
||||
sourceXapis[sourceId].installPatches({
|
||||
patches: patchesName[i++],
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
await Promise.all(installPatchesPromises)
|
||||
}
|
||||
|
||||
// Merge the sources into the target sequentially to be safe.
|
||||
for (const source of sources) {
|
||||
await _xo.mergeXenPools(source._xapiId, target._xapiId, force)
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user