Compare commits
149 Commits
pierre-fix
...
xo-server-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6b3d334e76 | ||
|
|
14f5fd8f73 | ||
|
|
5f73aee0df | ||
|
|
f8666ba367 | ||
|
|
9e80f76dd8 | ||
|
|
c76a5eaf67 | ||
|
|
cd378f0168 | ||
|
|
7d51ff0cf5 | ||
|
|
47819ea956 | ||
|
|
c7e3560c98 | ||
|
|
b24400b21d | ||
|
|
6c1d651687 | ||
|
|
e7757b53e7 | ||
|
|
a6d182e92d | ||
|
|
925eca1463 | ||
|
|
8b454f0d39 | ||
|
|
7c4d110353 | ||
|
|
6df55523b6 | ||
|
|
3ec6a24634 | ||
|
|
164b4218c4 | ||
|
|
56df8a6477 | ||
|
|
47a83b312d | ||
|
|
41a28ae088 | ||
|
|
436a8755ae | ||
|
|
960b179d95 | ||
|
|
0f0d0e1076 | ||
|
|
a8bd0d8075 | ||
|
|
986d3af685 | ||
|
|
1833f9ffdf | ||
|
|
30a6877f8a | ||
|
|
aaae2583c7 | ||
|
|
7f24afc2e7 | ||
|
|
0040923e12 | ||
|
|
844efb88d8 | ||
|
|
9efc3dd1fb | ||
|
|
67853bad8e | ||
|
|
faa8e1441a | ||
|
|
5c54611d1b | ||
|
|
dcf55e4385 | ||
|
|
2b0f1b6aab | ||
|
|
ae6cc8eea3 | ||
|
|
5279fa49a7 | ||
|
|
dcd8a62784 | ||
|
|
8c197b0e1a | ||
|
|
aed824b200 | ||
|
|
036b30212e | ||
|
|
3451ab3f50 | ||
|
|
0d0a92c2b1 | ||
|
|
aa19bc7bf5 | ||
|
|
347759b2e7 | ||
|
|
352230446c | ||
|
|
3eff8102e1 | ||
|
|
6693d845d9 | ||
|
|
4d79c462db | ||
|
|
c44ef6a1dc | ||
|
|
f0996fcfa7 | ||
|
|
54bc384d37 | ||
|
|
504fc1efe8 | ||
|
|
f4179b93fb | ||
|
|
564252c198 | ||
|
|
802a7a4463 | ||
|
|
3b3d6ba13c | ||
|
|
7350bf58e2 | ||
|
|
d37e29afc6 | ||
|
|
40de8c9e23 | ||
|
|
c81eac13c8 | ||
|
|
a6e1860f0d | ||
|
|
03eb2d81f0 | ||
|
|
171710b5e8 | ||
|
|
bed76429c2 | ||
|
|
d19f9b5062 | ||
|
|
38081d9822 | ||
|
|
54e278d3f7 | ||
|
|
181ed1b1a5 | ||
|
|
fb2d325ccb | ||
|
|
5f94a52537 | ||
|
|
c69b50c5d2 | ||
|
|
1c72f89178 | ||
|
|
14bd16da14 | ||
|
|
11a57f4618 | ||
|
|
57f35aff90 | ||
|
|
60e63a307f | ||
|
|
175e878ea6 | ||
|
|
5c960a3213 | ||
|
|
5dfb299e37 | ||
|
|
3890d4d9d1 | ||
|
|
77c62d6e7d | ||
|
|
ba54b53194 | ||
|
|
b4ef7352f2 | ||
|
|
1ce3368530 | ||
|
|
a4b32f3cb7 | ||
|
|
ee9cc05ae0 | ||
|
|
b8ccf2b0d6 | ||
|
|
886b499b94 | ||
|
|
07924d5621 | ||
|
|
43f3367ae4 | ||
|
|
454c73f42f | ||
|
|
041df698d5 | ||
|
|
97081f1219 | ||
|
|
f6792bf080 | ||
|
|
88635f31d6 | ||
|
|
abd0f115fc | ||
|
|
e9766c76c1 | ||
|
|
570506b324 | ||
|
|
11889880eb | ||
|
|
a86abde893 | ||
|
|
2cfe3360d8 | ||
|
|
60d75cb8ee | ||
|
|
68838e310a | ||
|
|
161de6cb7c | ||
|
|
af5a9b644b | ||
|
|
785426eab5 | ||
|
|
9267aef498 | ||
|
|
ae27a07578 | ||
|
|
131b2a35aa | ||
|
|
5a89601b24 | ||
|
|
2528bbc552 | ||
|
|
7c3a480003 | ||
|
|
80eac8443d | ||
|
|
a97234c48d | ||
|
|
53ea58c2f6 | ||
|
|
d867524c6b | ||
|
|
5edf9bde78 | ||
|
|
770ea55872 | ||
|
|
4eb0101c5b | ||
|
|
5d7af94abf | ||
|
|
b729b8f7c8 | ||
|
|
064e69d943 | ||
|
|
d880931951 | ||
|
|
f24741cd32 | ||
|
|
45c7017e83 | ||
|
|
7cfb891e6b | ||
|
|
fc8604e896 | ||
|
|
6b5e94103d | ||
|
|
aee4679ae5 | ||
|
|
2c2c930fce | ||
|
|
3f309e4db5 | ||
|
|
d26be402db | ||
|
|
a571e83005 | ||
|
|
10d5228eb2 | ||
|
|
7ed49b476f | ||
|
|
5396b90695 | ||
|
|
a6983d4e7b | ||
|
|
a3d1c76f67 | ||
|
|
15fab226b7 | ||
|
|
5a065d5a05 | ||
|
|
de81f3ffbb | ||
|
|
9103369cf6 | ||
|
|
7be36e6d0d |
@@ -38,6 +38,8 @@ module.exports = {
|
||||
// disabled because XAPI objects are using camel case
|
||||
camelcase: ['off'],
|
||||
|
||||
'react/jsx-handler-names': 'off',
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.4.1",
|
||||
"xen-api": "^0.27.1"
|
||||
"xen-api": "^0.27.2"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/cron",
|
||||
"version": "1.0.3",
|
||||
"version": "1.0.4",
|
||||
"license": "ISC",
|
||||
"description": "Focused, well maintained, cron parser/scheduler",
|
||||
"keywords": [
|
||||
@@ -47,7 +47,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -5,9 +5,16 @@ import parse from './parse'
|
||||
|
||||
const MAX_DELAY = 2 ** 31 - 1
|
||||
|
||||
function nextDelay(schedule) {
|
||||
const now = schedule._createDate()
|
||||
return next(schedule._schedule, now) - now
|
||||
}
|
||||
|
||||
class Job {
|
||||
constructor(schedule, fn) {
|
||||
const wrapper = () => {
|
||||
this._isRunning = true
|
||||
|
||||
let result
|
||||
try {
|
||||
result = fn()
|
||||
@@ -22,23 +29,34 @@ class Job {
|
||||
}
|
||||
}
|
||||
const scheduleNext = () => {
|
||||
const delay = schedule._nextDelay()
|
||||
this._timeout =
|
||||
delay < MAX_DELAY
|
||||
? setTimeout(wrapper, delay)
|
||||
: setTimeout(scheduleNext, MAX_DELAY)
|
||||
this._isRunning = false
|
||||
|
||||
if (this._isEnabled) {
|
||||
const delay = nextDelay(schedule)
|
||||
this._timeout =
|
||||
delay < MAX_DELAY
|
||||
? setTimeout(wrapper, delay)
|
||||
: setTimeout(scheduleNext, MAX_DELAY)
|
||||
}
|
||||
}
|
||||
|
||||
this._isEnabled = false
|
||||
this._isRunning = false
|
||||
this._scheduleNext = scheduleNext
|
||||
this._timeout = undefined
|
||||
}
|
||||
|
||||
start() {
|
||||
this.stop()
|
||||
this._scheduleNext()
|
||||
|
||||
this._isEnabled = true
|
||||
if (!this._isRunning) {
|
||||
this._scheduleNext()
|
||||
}
|
||||
}
|
||||
|
||||
stop() {
|
||||
this._isEnabled = false
|
||||
clearTimeout(this._timeout)
|
||||
}
|
||||
}
|
||||
@@ -68,11 +86,6 @@ class Schedule {
|
||||
return dates
|
||||
}
|
||||
|
||||
_nextDelay() {
|
||||
const now = this._createDate()
|
||||
return next(this._schedule, now) - now
|
||||
}
|
||||
|
||||
startJob(fn) {
|
||||
const job = this.createJob(fn)
|
||||
job.start()
|
||||
|
||||
62
@xen-orchestra/cron/src/index.spec.js
Normal file
62
@xen-orchestra/cron/src/index.spec.js
Normal file
@@ -0,0 +1,62 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { createSchedule } from './'
|
||||
|
||||
describe('issues', () => {
|
||||
test('stop during async execution', async () => {
|
||||
let nCalls = 0
|
||||
let resolve, promise
|
||||
|
||||
const job = createSchedule('* * * * *').createJob(() => {
|
||||
++nCalls
|
||||
|
||||
// eslint-disable-next-line promise/param-names
|
||||
promise = new Promise(r => {
|
||||
resolve = r
|
||||
})
|
||||
return promise
|
||||
})
|
||||
|
||||
job.start()
|
||||
jest.runAllTimers()
|
||||
|
||||
expect(nCalls).toBe(1)
|
||||
|
||||
job.stop()
|
||||
|
||||
resolve()
|
||||
await promise
|
||||
|
||||
jest.runAllTimers()
|
||||
expect(nCalls).toBe(1)
|
||||
})
|
||||
|
||||
test('stop then start during async job execution', async () => {
|
||||
let nCalls = 0
|
||||
let resolve, promise
|
||||
|
||||
const job = createSchedule('* * * * *').createJob(() => {
|
||||
++nCalls
|
||||
|
||||
// eslint-disable-next-line promise/param-names
|
||||
promise = new Promise(r => {
|
||||
resolve = r
|
||||
})
|
||||
return promise
|
||||
})
|
||||
|
||||
job.start()
|
||||
jest.runAllTimers()
|
||||
|
||||
expect(nCalls).toBe(1)
|
||||
|
||||
job.stop()
|
||||
job.start()
|
||||
|
||||
resolve()
|
||||
await promise
|
||||
|
||||
jest.runAllTimers()
|
||||
expect(nCalls).toBe(2)
|
||||
})
|
||||
})
|
||||
@@ -35,7 +35,7 @@
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
"cross-env": "^5.1.3",
|
||||
"dotenv": "^8.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
"babel-plugin-dev": "^1.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
3
@xen-orchestra/template/.babelrc.js
Normal file
3
@xen-orchestra/template/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
62
@xen-orchestra/template/README.md
Normal file
62
@xen-orchestra/template/README.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# @xen-orchestra/template [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/template):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/template
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Create a string replacer based on a pattern and a list of rules.
|
||||
|
||||
```js
|
||||
const myReplacer = compileTemplate('{name}_COPY_\{name}_{id}_%\%', {
|
||||
'{name}': vm => vm.name_label,
|
||||
'{id}': vm => vm.id,
|
||||
'%': (_, i) => i
|
||||
})
|
||||
|
||||
const newString = myReplacer({
|
||||
name_label: 'foo',
|
||||
id: 42,
|
||||
}, 32)
|
||||
|
||||
newString === 'foo_COPY_{name}_42_32%' // true
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
```
|
||||
# Install dependencies
|
||||
> yarn
|
||||
|
||||
# Run the tests
|
||||
> yarn test
|
||||
|
||||
# Continuously compile
|
||||
> yarn dev
|
||||
|
||||
# Continuously run the tests
|
||||
> yarn dev-test
|
||||
|
||||
# Build for production (automatically called by npm install)
|
||||
> yarn build
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
ISC © [Vates SAS](https://vates.fr)
|
||||
46
@xen-orchestra/template/package.json
Normal file
46
@xen-orchestra/template/package.json
Normal file
@@ -0,0 +1,46 @@
|
||||
{
|
||||
"name": "@xen-orchestra/template",
|
||||
"version": "0.1.0",
|
||||
"license": "ISC",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/template",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/template",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Julien Fontanet",
|
||||
"email": "julien.fontanet@vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.15"
|
||||
}
|
||||
}
|
||||
19
@xen-orchestra/template/src/index.js
Normal file
19
@xen-orchestra/template/src/index.js
Normal file
@@ -0,0 +1,19 @@
|
||||
import escapeRegExp from 'lodash/escapeRegExp'
|
||||
|
||||
const compareLengthDesc = (a, b) => b.length - a.length
|
||||
|
||||
export function compileTemplate(pattern, rules) {
|
||||
const matches = Object.keys(rules)
|
||||
.sort(compareLengthDesc)
|
||||
.map(escapeRegExp)
|
||||
.join('|')
|
||||
const regExp = new RegExp(`\\\\(?:\\\\|${matches})|${matches}`, 'g')
|
||||
return (...params) =>
|
||||
pattern.replace(regExp, match => {
|
||||
if (match[0] === '\\') {
|
||||
return match.slice(1)
|
||||
}
|
||||
const rule = rules[match]
|
||||
return typeof rule === 'function' ? rule(...params) : rule
|
||||
})
|
||||
}
|
||||
14
@xen-orchestra/template/src/index.spec.js
Normal file
14
@xen-orchestra/template/src/index.spec.js
Normal file
@@ -0,0 +1,14 @@
|
||||
/* eslint-env jest */
|
||||
import { compileTemplate } from '.'
|
||||
|
||||
it("correctly replaces the template's variables", () => {
|
||||
const replacer = compileTemplate(
|
||||
'{property}_\\{property}_\\\\{property}_{constant}_%_FOO',
|
||||
{
|
||||
'{property}': obj => obj.name,
|
||||
'{constant}': 1235,
|
||||
'%': (_, i) => i,
|
||||
}
|
||||
)
|
||||
expect(replacer({ name: 'bar' }, 5)).toBe('bar_{property}_\\bar_1235_5_FOO')
|
||||
})
|
||||
103
CHANGELOG.md
103
CHANGELOG.md
@@ -4,16 +4,105 @@
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SR/new] Clarify address formats [#4450](https://github.com/vatesfr/xen-orchestra/issues/4450) (PR [#4460](https://github.com/vatesfr/xen-orchestra/pull/4460))
|
||||
- [Backup NG/New] Show warning if zstd compression is not supported on a VM [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PRs [#4411](https://github.com/vatesfr/xen-orchestra/pull/4411))
|
||||
- [VM/disks] Don't hide disks that are attached to the same VM twice [#4400](https://github.com/vatesfr/xen-orchestra/issues/4400) (PR [#4414](https://github.com/vatesfr/xen-orchestra/pull/4414))
|
||||
- [VM/console] Add a button to connect to the VM via the local SSH client (PR [#4415](https://github.com/vatesfr/xen-orchestra/pull/4415))
|
||||
- [SDN Controller] Add possibility to encrypt private networks (PR [#4441](https://github.com/vatesfr/xen-orchestra/pull/4441))
|
||||
- [SDN Controller] Ability to configure MTU for private networks (PR [#4491](https://github.com/vatesfr/xen-orchestra/pull/4491))
|
||||
- [VM Export] Filenames are now prefixed with datetime [#4503](https://github.com/vatesfr/xen-orchestra/issues/4503)
|
||||
- [Backups] Improve performance by caching VM backups listing (PR [#4509](https://github.com/vatesfr/xen-orchestra/pull/4509))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [PBD] Obfuscate cifs password from device config [#4384](https://github.com/vatesfr/xen-orchestra/issues/4384) (PR [#4401](https://github.com/vatesfr/xen-orchestra/pull/4401))
|
||||
- [XOSAN] Fix "invalid parameters" error on creating a SR (PR [#4478](https://github.com/vatesfr/xen-orchestra/pull/4478))
|
||||
- [Patching] Avoid overloading XCP-ng by reducing the frequency of yum update checks [#4358](https://github.com/vatesfr/xen-orchestra/issues/4358) (PR [#4477](https://github.com/vatesfr/xen-orchestra/pull/4477))
|
||||
- [Network] Fix inability to create a bonded network (PR [#4489](https://github.com/vatesfr/xen-orchestra/pull/4489))
|
||||
- [Backup restore & Replication] Don't copy `sm_config` to new VDIs which might leads to useless coalesces [#4482](https://github.com/vatesfr/xen-orchestra/issues/4482) (PR [#4484](https://github.com/vatesfr/xen-orchestra/pull/4484))
|
||||
- [Home] Fix intermediary "no results" display showed on filtering items [#4420](https://github.com/vatesfr/xen-orchestra/issues/4420) (PR [#4456](https://github.com/vatesfr/xen-orchestra/pull/4456)
|
||||
- [Backup NG/New schedule] Properly show user errors in the form [#3831](https://github.com/vatesfr/xen-orchestra/issues/3831) (PR [#4131](https://github.com/vatesfr/xen-orchestra/pull/4131))
|
||||
- [VM/Advanced] Fix `"vm.set_domain_type" is not a function` error on switching virtualization mode (PV/HVM) [#4348](https://github.com/vatesfr/xen-orchestra/issues/4348) (PR [#4504](https://github.com/vatesfr/xen-orchestra/pull/4504))
|
||||
- [Backup NG/logs] Show warning when zstd compression is selected but not supported [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PR [#4375](https://github.com/vatesfr/xen-orchestra/pull/4375)
|
||||
- [Patches] Fix patches installation for CH 8.0 (PR [#4511](https://github.com/vatesfr/xen-orchestra/pull/4511))
|
||||
- [Network] Fix inability to set a network name [#4514](https://github.com/vatesfr/xen-orchestra/issues/4514) (PR [4510](https://github.com/vatesfr/xen-orchestra/pull/4510))
|
||||
- [Backup NG] Fix race conditions that could lead to disabled jobs still running (PR [4510](https://github.com/vatesfr/xen-orchestra/pull/4510))
|
||||
- [XOA] Remove "Updates" and "Licenses" tabs for non admin users (PR [#4526](https://github.com/vatesfr/xen-orchestra/pull/4526))
|
||||
- [New VM] Ability to escape [cloud config template](https://xen-orchestra.com/blog/xen-orchestra-5-21/#cloudconfigtemplates) variables [#4486](https://github.com/vatesfr/xen-orchestra/issues/4486) (PR [#4501](https://github.com/vatesfr/xen-orchestra/pull/4501))
|
||||
- [Backup NG] Properly log and report if job is already running [#4497](https://github.com/vatesfr/xen-orchestra/issues/4497) (PR [4534](https://github.com/vatesfr/xen-orchestra/pull/4534))
|
||||
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/cron v1.0.4
|
||||
- xo-server-sdn-controller v0.3.0
|
||||
- @xen-orchestra/template v0.1.0
|
||||
- xo-server v5.50.0
|
||||
- xo-web v5.50.0
|
||||
|
||||
|
||||
## **5.38.0** (2019-08-29)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/Attach disk] Display confirmation modal when VDI is already attached [#3381](https://github.com/vatesfr/xen-orchestra/issues/3381) (PR [#4366](https://github.com/vatesfr/xen-orchestra/pull/4366))
|
||||
- [Zstd]
|
||||
- [VM/copy, VM/export] Only show zstd option when it's supported [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PRs [#4326](https://github.com/vatesfr/xen-orchestra/pull/4326) [#4368](https://github.com/vatesfr/xen-orchestra/pull/4368))
|
||||
- [VM/Bulk copy] Show warning if zstd compression is not supported on a VM [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PR [#4346](https://github.com/vatesfr/xen-orchestra/pull/4346))
|
||||
- [VM import & Continuous Replication] Enable `guessVhdSizeOnImport` by default, this fix some `VDI_IO_ERROR` with XenServer 7.1 and XCP-ng 8.0 (PR [#4436](https://github.com/vatesfr/xen-orchestra/pull/4436))
|
||||
- [SDN Controller] Add possibility to create multiple GRE networks and VxLAN networks within a same pool (PR [#4435](https://github.com/vatesfr/xen-orchestra/pull/4435))
|
||||
- [SDN Controller] Add possibility to create cross-pool private networks (PR [#4405](https://github.com/vatesfr/xen-orchestra/pull/4405))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [SR/General] Display VDI VM name in SR usage graph (PR [#4370](https://github.com/vatesfr/xen-orchestra/pull/4370))
|
||||
- [VM/Attach disk] Fix checking VDI mode (PR [#4373](https://github.com/vatesfr/xen-orchestra/pull/4373))
|
||||
- [VM revert] Snapshot before: add admin ACLs on created snapshot [#4331](https://github.com/vatesfr/xen-orchestra/issues/4331) (PR [#4391](https://github.com/vatesfr/xen-orchestra/pull/4391))
|
||||
- [Network] Fixed "invalid parameters" error when creating bonded network [#4425](https://github.com/vatesfr/xen-orchestra/issues/4425) (PR [#4429](https://github.com/vatesfr/xen-orchestra/pull/4429))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-sdn-controller v0.2.0
|
||||
- xo-server-usage-report v0.7.3
|
||||
- xo-server v5.48.0
|
||||
- xo-web v5.48.1
|
||||
|
||||
## **5.37.1** (2019-08-06)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SDN Controller] Let the user choose on which PIF to create a private network (PR [#4379](https://github.com/vatesfr/xen-orchestra/pull/4379))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [SDN Controller] Better detect host shutting down to adapt network topology (PR [#4314](https://github.com/vatesfr/xen-orchestra/pull/4314))
|
||||
- [SDN Controller] Add new hosts to pool's private networks (PR [#4382](https://github.com/vatesfr/xen-orchestra/pull/4382))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-sdn-controller v0.1.2
|
||||
|
||||
## **5.37.0** (2019-07-25)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Pool] Ability to add multiple hosts on the pool [#2402](https://github.com/vatesfr/xen-orchestra/issues/2402) (PR [#3716](https://github.com/vatesfr/xen-orchestra/pull/3716))
|
||||
- [SR/General] Improve SR usage graph [#3608](https://github.com/vatesfr/xen-orchestra/issues/3608) (PR [#3830](https://github.com/vatesfr/xen-orchestra/pull/3830))
|
||||
- [VM] Permission to revert to any snapshot for VM operators [#3928](https://github.com/vatesfr/xen-orchestra/issues/3928) (PR [#4247](https://github.com/vatesfr/xen-orchestra/pull/4247))
|
||||
- [Backup NG] Ability to bypass unhealthy VDI chains check [#4324](https://github.com/vatesfr/xen-orchestra/issues/4324) (PR [#4340](https://github.com/vatesfr/xen-orchestra/pull/4340))
|
||||
- [VM/console] Multiline copy/pasting [#4261](https://github.com/vatesfr/xen-orchestra/issues/4261) (PR [#4341](https://github.com/vatesfr/xen-orchestra/pull/4341))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Stats] Ability to display last day stats [#4160](https://github.com/vatesfr/xen-orchestra/issues/4160) (PR [#4168](https://github.com/vatesfr/xen-orchestra/pull/4168))
|
||||
- [Settings/servers] Display servers connection issues [#4300](https://github.com/vatesfr/xen-orchestra/issues/4300) (PR [#4310](https://github.com/vatesfr/xen-orchestra/pull/4310))
|
||||
- [VM] Permission to revert to any snapshot for VM operators [#3928](https://github.com/vatesfr/xen-orchestra/issues/3928) (PR [#4247](https://github.com/vatesfr/xen-orchestra/pull/4247))
|
||||
- [VM] Show current operations and progress [#3811](https://github.com/vatesfr/xen-orchestra/issues/3811) (PR [#3982](https://github.com/vatesfr/xen-orchestra/pull/3982))
|
||||
- [SR/General] Improve SR usage graph [#3608](https://github.com/vatesfr/xen-orchestra/issues/3608) (PR [#3830](https://github.com/vatesfr/xen-orchestra/pull/3830))
|
||||
- [Backup NG/New] Generate default schedule if no schedule is specified [#4036](https://github.com/vatesfr/xen-orchestra/issues/4036) (PR [#4183](https://github.com/vatesfr/xen-orchestra/pull/4183))
|
||||
- [Host/Advanced] Ability to edit iSCSI IQN [#4048](https://github.com/vatesfr/xen-orchestra/issues/4048) (PR [#4208](https://github.com/vatesfr/xen-orchestra/pull/4208))
|
||||
- [Backup NG] Ability to bypass unhealthy VDI chains check [#4324](https://github.com/vatesfr/xen-orchestra/issues/4324) (PR [#4340](https://github.com/vatesfr/xen-orchestra/pull/4340))
|
||||
- [Pool] Ability to add multiple hosts on the pool [#2402](https://github.com/vatesfr/xen-orchestra/issues/2402) (PR [#3716](https://github.com/vatesfr/xen-orchestra/pull/3716))
|
||||
- [VM/console] Multiline copy/pasting [#4261](https://github.com/vatesfr/xen-orchestra/issues/4261) (PR [#4341](https://github.com/vatesfr/xen-orchestra/pull/4341))
|
||||
- [VM,host] Improved state icons/pills (colors and tooltips) (PR [#4363](https://github.com/vatesfr/xen-orchestra/pull/4363))
|
||||
|
||||
### Bug fixes
|
||||
@@ -39,8 +128,6 @@
|
||||
|
||||
## **5.36.0** (2019-06-27)
|
||||
|
||||

|
||||
|
||||
### Highlights
|
||||
|
||||
- [SR/new] Create ZFS storage [#4260](https://github.com/vatesfr/xen-orchestra/issues/4260) (PR [#4266](https://github.com/vatesfr/xen-orchestra/pull/4266))
|
||||
@@ -78,8 +165,6 @@
|
||||
|
||||
## **5.35.0** (2019-05-29)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/general] Display 'Started... ago' instead of 'Halted... ago' for paused state [#3750](https://github.com/vatesfr/xen-orchestra/issues/3750) (PR [#4170](https://github.com/vatesfr/xen-orchestra/pull/4170))
|
||||
|
||||
@@ -7,13 +7,18 @@
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [VM/copy] Only show zstd option when it's supported [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PR [#4326](https://github.com/vatesfr/xen-orchestra/pull/4326))
|
||||
- [Settings/Logs] Differenciate XS/XCP-ng errors from XO errors [#4101](https://github.com/vatesfr/xen-orchestra/issues/4101) (PR [#4385](https://github.com/vatesfr/xen-orchestra/pull/4385))
|
||||
- [Backups] Improve performance by caching logs consolidation (PR [#4541](https://github.com/vatesfr/xen-orchestra/pull/4541))
|
||||
- [New VM] Cloud Init available for all plans (PR [#4543](https://github.com/vatesfr/xen-orchestra/pull/4543))
|
||||
- [Servers] IPv6 addresses can be used [#4520](https://github.com/vatesfr/xen-orchestra/issues/4520) (PR [#4521](https://github.com/vatesfr/xen-orchestra/pull/4521)) \
|
||||
Note: They must enclosed in brackets to differentiate with the port, e.g.: `[2001:db8::7334]` or `[ 2001:db8::7334]:4343`
|
||||
- [HUB] VM template store [#1918](https://github.com/vatesfr/xen-orchestra/issues/1918) (PR [#4442](https://github.com/vatesfr/xen-orchestra/pull/4442))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [SDN Controller] Better detect host shutting down to adapt network topology (PR [#4314](https://github.com/vatesfr/xen-orchestra/pull/4314))
|
||||
- [Host] Fix an issue where host was wrongly reporting time inconsistency (PR [#4540](https://github.com/vatesfr/xen-orchestra/pull/4540))
|
||||
|
||||
### Released packages
|
||||
|
||||
@@ -22,6 +27,7 @@
|
||||
>
|
||||
> Rule of thumb: add packages on top.
|
||||
|
||||
- xo-server-sdn-controller v0.1.2
|
||||
- xo-server v5.47.0
|
||||
- xo-web v5.47.0
|
||||
- xen-api v0.27.2
|
||||
- xo-server-cloud v0.3.0
|
||||
- xo-server v5.51.0
|
||||
- xo-web v5.51.0
|
||||
|
||||
@@ -1,14 +1,19 @@
|
||||
### Check list
|
||||
|
||||
> Check items when done or if not relevant
|
||||
> Check if done.
|
||||
>
|
||||
> Strikethrough if not relevant: ~~example~~ ([doc](https://help.github.com/en/articles/basic-writing-and-formatting-syntax)).
|
||||
|
||||
- [ ] PR reference the relevant issue (e.g. `Fixes #007`)
|
||||
- [ ] PR reference the relevant issue (e.g. `Fixes #007` or `See xoa-support#42`)
|
||||
- [ ] if UI changes, a screenshot has been added to the PR
|
||||
- [ ] `CHANGELOG.unreleased.md`:
|
||||
- enhancement/bug fix entry added
|
||||
- list of packages to release updated (`${name} v${new version}`)
|
||||
- [ ] documentation updated
|
||||
- [ ] **I have tested added/updated features** (and impacted code)
|
||||
- `CHANGELOG.unreleased.md`:
|
||||
- [ ] enhancement/bug fix entry added
|
||||
- [ ] list of packages to release updated (`${name} v${new version}`)
|
||||
- **I have tested added/updated features** (and impacted code)
|
||||
- [ ] unit tests (e.g. [`cron/parse.spec.js`](https://github.com/vatesfr/xen-orchestra/blob/b24400b21de1ebafa1099c56bac1de5c988d9202/%40xen-orchestra/cron/src/parse.spec.js))
|
||||
- [ ] if `xo-server` API changes, the corresponding test has been added to/updated on [`xo-server-test`](https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-test)
|
||||
- [ ] at least manual testing
|
||||
|
||||
### Process
|
||||
|
||||
@@ -16,3 +21,10 @@
|
||||
1. mark it as `WiP:` (Work in Progress) if not ready to be merged
|
||||
1. when you want a review, add a reviewer (and only one)
|
||||
1. if necessary, update your PR, and re- add a reviewer
|
||||
|
||||
From [_the Four Agreements_](https://en.wikipedia.org/wiki/Don_Miguel_Ruiz#The_Four_Agreements):
|
||||
|
||||
1. Be impeccable with your word.
|
||||
1. Don't take anything personally.
|
||||
1. Don't make assumptions.
|
||||
1. Always do your best.
|
||||
|
||||
@@ -13,11 +13,11 @@ It aims to be easy to use on any device supporting modern web technologies (HTML
|
||||
|
||||
## XOA quick deploy
|
||||
|
||||
SSH to your XenServer, and execute the following:
|
||||
Log in to your account and use the deploy form available on [this page](https://xen-orchestra.com/#!/xoa)
|
||||
|
||||
```
|
||||
bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
```
|
||||
> **Note:** no data will be sent to our servers, it's running only between your browser and your host!
|
||||
|
||||
[](https://xen-orchestra.com/#!/xoa)
|
||||
|
||||
### XOA credentials
|
||||
|
||||
|
||||
@@ -55,6 +55,7 @@
|
||||
* [Emergency Shutdown](emergency_shutdown.md)
|
||||
* [Auto scalability](auto_scalability.md)
|
||||
* [Forecaster](forecaster.md)
|
||||
* [SDN Controller](sdn_controller.md)
|
||||
* [Recipes](recipes.md)
|
||||
* [Reverse proxy](reverse_proxy.md)
|
||||
* [How to contribute?](contributing.md)
|
||||
|
||||
BIN
docs/assets/deploy_form.png
Normal file
BIN
docs/assets/deploy_form.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 41 KiB |
BIN
docs/assets/sdn-controller.png
Normal file
BIN
docs/assets/sdn-controller.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 85 KiB |
@@ -15,5 +15,6 @@ We've made multiple categories to help you to find what you need:
|
||||
* [Job Manager](scheduler.html)
|
||||
* [Alerts](alerts.html)
|
||||
* [Load balancing](load_balancing.html)
|
||||
* [SDN Controller](sdn_controller.html)
|
||||
|
||||

|
||||
|
||||
@@ -1,14 +1,10 @@
|
||||
|
||||
# Installation
|
||||
|
||||
SSH to your XenServer/XCP-ng host and execute the following:
|
||||
Log in to your account and use the deploy form available on [this page](https://xen-orchestra.com/#!/xoa)
|
||||
|
||||
```
|
||||
bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
```
|
||||

|
||||
|
||||
This will automatically download/import/start the XOA appliance. Nothing is changed on your host itself, it's 100% safe.
|
||||
|
||||
## [More on XOA](xoa.md)
|
||||
## [More on XOA and alternate deploy](xoa.md)
|
||||
|
||||

|
||||
|
||||
60
docs/sdn_controller.md
Normal file
60
docs/sdn_controller.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# SDN Controller
|
||||
|
||||
> SDN Controller is available in XOA 5.44 and higher
|
||||
|
||||
The SDN Controller enables a user to **create pool-wide and cross-pool** (since XOA 5.48.1) **private networks**.
|
||||
|
||||

|
||||
|
||||
## How does it work?
|
||||
|
||||
Please read the [dedicated devblog on the SDN Controller](https://xen-orchestra.com/blog/xo-sdn-controller/) and its [extension for cross-pool private networks](https://xen-orchestra.com/blog/devblog-3-extending-the-sdn-controller/).
|
||||
|
||||
## Usage
|
||||
|
||||
### Network creation
|
||||
|
||||
In the network creation view:
|
||||
- Select a `pool`
|
||||
- Select `Private network`
|
||||
- Select an interface on which to create the network's tunnels
|
||||
- Select the encapsulation: a choice is offered between `GRE` and `VxLAN`, if `VxLAN` is chosen, then port 4789 must be open for UDP traffic on all the network's hosts (see [the requirements](#vxlan))
|
||||
- Choose if the network should be encrypted or not (see [the requirements](#encryption) to use encryption)
|
||||
- Select other `pool`s to add them to the network if desired
|
||||
- For each added `pool`: select an interface on which to create the tunnels
|
||||
- Create the network
|
||||
- Have fun! ☺
|
||||
|
||||
***NB:***
|
||||
- All hosts in a private network must be able to reach the other hosts' management interface.
|
||||
> The term ‘management interface’ is used to indicate the IP-enabled NIC that carries the management traffic.
|
||||
- Only 1 encrypted GRE network and 1 encrypted VxLAN network per pool can exist at a time due to Open vSwitch limitation.
|
||||
|
||||
### Configuration
|
||||
|
||||
Like all other xo-server plugins, it can be configured directly via
|
||||
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
|
||||
|
||||
The plugin's configuration contains:
|
||||
- `cert-dir`: The path where the plugin will look for the certificates to create SSL connections with the hosts.
|
||||
If none is provided, the plugin will create its own self-signed certificates.
|
||||
- `override-certs`: Enable to uninstall the existing SDN controller CA certificate in order to replace it with the plugin's one.
|
||||
|
||||
## Requirements
|
||||
|
||||
### VxLAN
|
||||
|
||||
- On XCP-ng prior to 7.6:
|
||||
- To be able to use `VxLAN`, the following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `VxLAN` is wanted: `-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m udp --dport 4789 -j ACCEPT`
|
||||
|
||||
### Encryption
|
||||
|
||||
> Encryption is not available prior to 8.0.
|
||||
|
||||
- On XCP-ng 8.0:
|
||||
- To be able to encrypt the networks, `openvswitch-ipsec` package must be installed on all the hosts:
|
||||
- `yum install openvswitch-ipsec --enablerepo=xcp-ng-testing`
|
||||
- `systemctl enable ipsec`
|
||||
- `systemctl enable openvswitch-ipsec`
|
||||
- `systemctl start ipsec`
|
||||
- `systemctl start openvswitch-ipsec`
|
||||
@@ -110,16 +110,17 @@ $ systemctl restart xo-server
|
||||
|
||||
### Behind a transparent proxy
|
||||
|
||||
If your are behind a transparent proxy, you'll probably have issues with the updater (SSL/TLS issues).
|
||||
If you're behind a transparent proxy, you'll probably have issues with the updater (SSL/TLS issues).
|
||||
|
||||
First, run the following commands:
|
||||
Run the following commands to allow the updater to work:
|
||||
|
||||
```
|
||||
$ sudo -s
|
||||
$ echo NODE_TLS_REJECT_UNAUTHORIZED=0 >> /etc/xo-appliance/env
|
||||
$ npm config -g set strict-ssl=false
|
||||
$ systemctl restart xoa-updater
|
||||
```
|
||||
|
||||
Then, restart the updater with `systemctl restart xoa-updater`.
|
||||
Now try running an update again.
|
||||
|
||||
### Updating SSL self-signed certificate
|
||||
|
||||
|
||||
18
docs/xoa.md
18
docs/xoa.md
@@ -22,26 +22,36 @@ For use on huge infrastructure (more than 500+ VMs), feel free to increase the R
|
||||
|
||||
### The quickest way
|
||||
|
||||
The fastest way to install Xen Orchestra is to use our appliance deploy script. You can deploy it by connecting to your XenServer host and executing the following:
|
||||
The **fastest and most secure way** to install Xen Orchestra is to use our web deploy page. Go on https://xen-orchestra.com/#!/xoa and follow instructions.
|
||||
|
||||
> **Note:** no data will be sent to our servers, it's running only between your browser and your host!
|
||||
|
||||

|
||||
|
||||
### Via a bash script
|
||||
|
||||
Alternatively, you can deploy it by connecting to your XenServer host and executing the following:
|
||||
|
||||
```
|
||||
bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
```
|
||||
**Note:** This won't write or modify anything on your XenServer host: it will just import the XOA VM into your default storage repository.
|
||||
|
||||
Now follow the instructions:
|
||||
> **Note:** This won't write or modify anything on your XenServer host: it will just import the XOA VM into your default storage repository.
|
||||
|
||||
Follow the instructions:
|
||||
|
||||
* Your IP configuration will be requested: it's set to **DHCP by default**, otherwise you can enter a fixed IP address (eg `192.168.0.10`)
|
||||
* If DHCP is selected, the script will continue automatically. Otherwise a netmask, gateway, and DNS should be provided.
|
||||
* XOA will be deployed on your default storage repository. You can move it elsewhere anytime after.
|
||||
|
||||
### The alternative
|
||||
### Via download the XVA
|
||||
|
||||
Download XOA from xen-orchestra.com. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
|
||||
|
||||
After the VM is imported, you just need to start it with `xe vm-start vm="XOA"` or with XenCenter.
|
||||
|
||||
## First Login
|
||||
|
||||
Once you have started the VM, you can access the web UI by putting the IP you configured during deployment into your web browser. If you did not configure an IP or are unsure, try one of the following methods to find it:
|
||||
|
||||
* Run `xe vm-list params=name-label,networks | grep -A 1 XOA` on your host
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
"benchmark": "^2.1.4",
|
||||
"eslint": "^6.0.1",
|
||||
"eslint-config-prettier": "^6.0.0",
|
||||
"eslint-config-standard": "12.0.0",
|
||||
"eslint-config-standard-jsx": "^6.0.2",
|
||||
"eslint-config-standard": "14.1.0",
|
||||
"eslint-config-standard-jsx": "^8.1.0",
|
||||
"eslint-plugin-eslint-comments": "^3.1.1",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-plugin-node": "^9.0.1",
|
||||
@@ -17,7 +17,7 @@
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.102.0",
|
||||
"flow-bin": "^0.106.3",
|
||||
"globby": "^10.0.0",
|
||||
"husky": "^3.0.0",
|
||||
"jest": "^24.1.0",
|
||||
@@ -42,9 +42,11 @@
|
||||
"testEnvironment": "node",
|
||||
"testPathIgnorePatterns": [
|
||||
"/dist/",
|
||||
"/xo-server-test/",
|
||||
"/xo-web/"
|
||||
],
|
||||
"testRegex": "\\.spec\\.js$",
|
||||
"timers": "fake",
|
||||
"transform": {
|
||||
"\\.jsx?$": "babel-jest"
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.1",
|
||||
"rimraf": "^2.6.2"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"cli-progress": "^2.0.0",
|
||||
"cli-progress": "^3.1.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"struct-fu": "^1.2.0",
|
||||
@@ -43,7 +43,7 @@
|
||||
"execa": "^2.0.2",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"rimraf": "^2.6.1",
|
||||
"rimraf": "^3.0.0",
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
"get-stream": "^5.1.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"rimraf": "^2.6.2",
|
||||
"rimraf": "^3.0.0",
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^0.27.1"
|
||||
"xen-api": "^0.27.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
@@ -49,7 +49,7 @@
|
||||
"@babel/preset-env": "^7.1.5",
|
||||
"babel-plugin-lodash": "^3.2.11",
|
||||
"cross-env": "^5.1.4",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
189
packages/xen-api/examples/package-lock.json
generated
Normal file
189
packages/xen-api/examples/package-lock.json
generated
Normal file
@@ -0,0 +1,189 @@
|
||||
{
|
||||
"requires": true,
|
||||
"lockfileVersion": 1,
|
||||
"dependencies": {
|
||||
"core-util-is": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
|
||||
"integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac="
|
||||
},
|
||||
"debug": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
||||
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
|
||||
"requires": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
},
|
||||
"event-loop-delay": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/event-loop-delay/-/event-loop-delay-1.0.0.tgz",
|
||||
"integrity": "sha512-8YtyeIWHXrvTqlAhv+fmtaGGARmgStbvocERYzrZ3pwhnQULe5PuvMUTjIWw/emxssoaftfHZsJtkeY8xjiXCg==",
|
||||
"requires": {
|
||||
"napi-macros": "^1.8.2",
|
||||
"node-gyp-build": "^3.7.0"
|
||||
}
|
||||
},
|
||||
"getopts": {
|
||||
"version": "2.2.5",
|
||||
"resolved": "https://registry.npmjs.org/getopts/-/getopts-2.2.5.tgz",
|
||||
"integrity": "sha512-9jb7AW5p3in+IiJWhQiZmmwkpLaR/ccTWdWQCtZM66HJcHHLegowh4q4tSD7gouUyeNvFWRavfK9GXosQHDpFA=="
|
||||
},
|
||||
"golike-defer": {
|
||||
"version": "0.4.1",
|
||||
"resolved": "https://registry.npmjs.org/golike-defer/-/golike-defer-0.4.1.tgz",
|
||||
"integrity": "sha512-x8cq/Fvu32T8cnco3CBDRF+/M2LFmfSIysKfecX09uIK3cFdHcEKBTPlPnEO6lwrdxfjkOIU6dIw3EIlEJeS1A=="
|
||||
},
|
||||
"human-format": {
|
||||
"version": "0.10.1",
|
||||
"resolved": "https://registry.npmjs.org/human-format/-/human-format-0.10.1.tgz",
|
||||
"integrity": "sha512-UzCHToSw3HI9MxH9tYzMr1JbHJbgzr6o0hZCun7sruv59S1leps21bmgpBkkwEvQon5n/2OWKH1iU7BEko02cg=="
|
||||
},
|
||||
"inherits": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
|
||||
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
|
||||
},
|
||||
"isarray": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
|
||||
"integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
|
||||
},
|
||||
"make-error": {
|
||||
"version": "1.3.5",
|
||||
"resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.5.tgz",
|
||||
"integrity": "sha512-c3sIjNUow0+8swNwVpqoH4YCShKNFkMaw6oH1mNS2haDZQqkeZFlHS3dhoeEbKKmJB4vXpJucU6oH75aDYeE9g=="
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
|
||||
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
|
||||
},
|
||||
"napi-macros": {
|
||||
"version": "1.8.2",
|
||||
"resolved": "https://registry.npmjs.org/napi-macros/-/napi-macros-1.8.2.tgz",
|
||||
"integrity": "sha512-Tr0DNY4RzTaBG2W2m3l7ZtFuJChTH6VZhXVhkGGjF/4cZTt+i8GcM9ozD+30Lmr4mDoZ5Xx34t2o4GJqYWDGcg=="
|
||||
},
|
||||
"node-gyp-build": {
|
||||
"version": "3.9.0",
|
||||
"resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-3.9.0.tgz",
|
||||
"integrity": "sha512-zLcTg6P4AbcHPq465ZMFNXx7XpKKJh+7kkN699NiQWisR2uWYOWNWqRHAmbnmKiL4e9aLSlmy5U7rEMUXV59+A=="
|
||||
},
|
||||
"prettier-bytes": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/prettier-bytes/-/prettier-bytes-1.0.4.tgz",
|
||||
"integrity": "sha1-mUsCqkb2mcULYle1+qp/4lV+YtY="
|
||||
},
|
||||
"process-nextick-args": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
|
||||
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
|
||||
},
|
||||
"process-top": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/process-top/-/process-top-1.0.0.tgz",
|
||||
"integrity": "sha512-er8iSmBMslOt5cgIHg9m6zilTPsuUqpEb1yfQ4bDmO80zr/e/5hNn+Tay3CJM/FOBnJo8Bt3fFiDDH6GvIgeAg==",
|
||||
"requires": {
|
||||
"event-loop-delay": "^1.0.0",
|
||||
"prettier-bytes": "^1.0.4"
|
||||
}
|
||||
},
|
||||
"progress-stream": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/progress-stream/-/progress-stream-2.0.0.tgz",
|
||||
"integrity": "sha1-+sY6Cz0R3qy7CWmrzJOyFLzhntU=",
|
||||
"requires": {
|
||||
"speedometer": "~1.0.0",
|
||||
"through2": "~2.0.3"
|
||||
}
|
||||
},
|
||||
"promise-toolbox": {
|
||||
"version": "0.13.0",
|
||||
"resolved": "https://registry.npmjs.org/promise-toolbox/-/promise-toolbox-0.13.0.tgz",
|
||||
"integrity": "sha512-Z6u7EL9/QyY1zZqeqpEiKS7ygKwZyl0JL0ouno/en6vMliZZc4AmM0aFCrDAVxEyKqj2f3SpkW0lXEfAZsNWiQ==",
|
||||
"requires": {
|
||||
"make-error": "^1.3.2"
|
||||
}
|
||||
},
|
||||
"readable-stream": {
|
||||
"version": "3.4.0",
|
||||
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.4.0.tgz",
|
||||
"integrity": "sha512-jItXPLmrSR8jmTRmRWJXCnGJsfy85mB3Wd/uINMXA65yrnFo0cPClFIUWzo2najVNSl+mx7/4W8ttlLWJe99pQ==",
|
||||
"requires": {
|
||||
"inherits": "^2.0.3",
|
||||
"string_decoder": "^1.1.1",
|
||||
"util-deprecate": "^1.0.1"
|
||||
}
|
||||
},
|
||||
"safe-buffer": {
|
||||
"version": "5.1.2",
|
||||
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
|
||||
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
|
||||
},
|
||||
"speedometer": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/speedometer/-/speedometer-1.0.0.tgz",
|
||||
"integrity": "sha1-zWccsGdSwivKM3Di8zREC+T8YuI="
|
||||
},
|
||||
"stream-parser": {
|
||||
"version": "0.3.1",
|
||||
"resolved": "https://registry.npmjs.org/stream-parser/-/stream-parser-0.3.1.tgz",
|
||||
"integrity": "sha1-FhhUhpRCACGhGC/wrxkRwSl2F3M=",
|
||||
"requires": {
|
||||
"debug": "2"
|
||||
}
|
||||
},
|
||||
"string_decoder": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
|
||||
"integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
|
||||
"requires": {
|
||||
"safe-buffer": "~5.1.0"
|
||||
}
|
||||
},
|
||||
"throttle": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/throttle/-/throttle-1.0.3.tgz",
|
||||
"integrity": "sha1-ijLkoV8XY9mXlIMXxevjrYpB5Lc=",
|
||||
"requires": {
|
||||
"readable-stream": ">= 0.3.0",
|
||||
"stream-parser": ">= 0.0.2"
|
||||
}
|
||||
},
|
||||
"through2": {
|
||||
"version": "2.0.5",
|
||||
"resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz",
|
||||
"integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==",
|
||||
"requires": {
|
||||
"readable-stream": "~2.3.6",
|
||||
"xtend": "~4.0.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"readable-stream": {
|
||||
"version": "2.3.6",
|
||||
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
|
||||
"integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
|
||||
"requires": {
|
||||
"core-util-is": "~1.0.0",
|
||||
"inherits": "~2.0.3",
|
||||
"isarray": "~1.0.0",
|
||||
"process-nextick-args": "~2.0.0",
|
||||
"safe-buffer": "~5.1.1",
|
||||
"string_decoder": "~1.1.1",
|
||||
"util-deprecate": "~1.0.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"util-deprecate": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
|
||||
"integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
|
||||
},
|
||||
"xtend": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
|
||||
"integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@
|
||||
"human-format": "^0.10.1",
|
||||
"process-top": "^1.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"readable-stream": "^3.1.1",
|
||||
"throttle": "^1.0.3"
|
||||
}
|
||||
|
||||
@@ -1,179 +0,0 @@
|
||||
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
|
||||
# yarn lockfile v1
|
||||
|
||||
|
||||
core-util-is@~1.0.0:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
|
||||
integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=
|
||||
|
||||
debug@2:
|
||||
version "2.6.9"
|
||||
resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f"
|
||||
integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
|
||||
dependencies:
|
||||
ms "2.0.0"
|
||||
|
||||
event-loop-delay@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/event-loop-delay/-/event-loop-delay-1.0.0.tgz#5af6282549494fd0d868c499cbdd33e027978b8c"
|
||||
integrity sha512-8YtyeIWHXrvTqlAhv+fmtaGGARmgStbvocERYzrZ3pwhnQULe5PuvMUTjIWw/emxssoaftfHZsJtkeY8xjiXCg==
|
||||
dependencies:
|
||||
napi-macros "^1.8.2"
|
||||
node-gyp-build "^3.7.0"
|
||||
|
||||
getopts@^2.2.3:
|
||||
version "2.2.3"
|
||||
resolved "https://registry.yarnpkg.com/getopts/-/getopts-2.2.3.tgz#11d229775e2ec2067ed8be6fcc39d9b4bf39cf7d"
|
||||
integrity sha512-viEcb8TpgeG05+Nqo5EzZ8QR0hxdyrYDp6ZSTZqe2M/h53Bk036NmqG38Vhf5RGirC/Of9Xql+v66B2gp256SQ==
|
||||
|
||||
golike-defer@^0.4.1:
|
||||
version "0.4.1"
|
||||
resolved "https://registry.yarnpkg.com/golike-defer/-/golike-defer-0.4.1.tgz#7a1cd435d61e461305805d980b133a0f3db4e1cc"
|
||||
|
||||
human-format@^0.10.1:
|
||||
version "0.10.1"
|
||||
resolved "https://registry.yarnpkg.com/human-format/-/human-format-0.10.1.tgz#107793f355912e256148d5b5dcf66a0230187ee9"
|
||||
integrity sha512-UzCHToSw3HI9MxH9tYzMr1JbHJbgzr6o0hZCun7sruv59S1leps21bmgpBkkwEvQon5n/2OWKH1iU7BEko02cg==
|
||||
|
||||
inherits@^2.0.3, inherits@~2.0.3:
|
||||
version "2.0.3"
|
||||
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
|
||||
integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=
|
||||
|
||||
isarray@~1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
|
||||
integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=
|
||||
|
||||
make-error@^1.3.2:
|
||||
version "1.3.5"
|
||||
resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.5.tgz#efe4e81f6db28cadd605c70f29c831b58ef776c8"
|
||||
integrity sha512-c3sIjNUow0+8swNwVpqoH4YCShKNFkMaw6oH1mNS2haDZQqkeZFlHS3dhoeEbKKmJB4vXpJucU6oH75aDYeE9g==
|
||||
|
||||
ms@2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8"
|
||||
integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=
|
||||
|
||||
napi-macros@^1.8.2:
|
||||
version "1.8.2"
|
||||
resolved "https://registry.yarnpkg.com/napi-macros/-/napi-macros-1.8.2.tgz#299265c1d8aa401351ad0675107d751228c03eda"
|
||||
integrity sha512-Tr0DNY4RzTaBG2W2m3l7ZtFuJChTH6VZhXVhkGGjF/4cZTt+i8GcM9ozD+30Lmr4mDoZ5Xx34t2o4GJqYWDGcg==
|
||||
|
||||
node-gyp-build@^3.7.0:
|
||||
version "3.7.0"
|
||||
resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-3.7.0.tgz#daa77a4f547b9aed3e2aac779eaf151afd60ec8d"
|
||||
integrity sha512-L/Eg02Epx6Si2NXmedx+Okg+4UHqmaf3TNcxd50SF9NQGcJaON3AtU++kax69XV7YWz4tUspqZSAsVofhFKG2w==
|
||||
|
||||
prettier-bytes@^1.0.4:
|
||||
version "1.0.4"
|
||||
resolved "https://registry.yarnpkg.com/prettier-bytes/-/prettier-bytes-1.0.4.tgz#994b02aa46f699c50b6257b5faaa7fe2557e62d6"
|
||||
integrity sha1-mUsCqkb2mcULYle1+qp/4lV+YtY=
|
||||
|
||||
process-nextick-args@~2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.0.tgz#a37d732f4271b4ab1ad070d35508e8290788ffaa"
|
||||
integrity sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==
|
||||
|
||||
process-top@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/process-top/-/process-top-1.0.0.tgz#52892bedb581c5abf0df2d0aa5c429e34275cc7e"
|
||||
integrity sha512-er8iSmBMslOt5cgIHg9m6zilTPsuUqpEb1yfQ4bDmO80zr/e/5hNn+Tay3CJM/FOBnJo8Bt3fFiDDH6GvIgeAg==
|
||||
dependencies:
|
||||
event-loop-delay "^1.0.0"
|
||||
prettier-bytes "^1.0.4"
|
||||
|
||||
progress-stream@^2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/progress-stream/-/progress-stream-2.0.0.tgz#fac63a0b3d11deacbb0969abcc93b214bce19ed5"
|
||||
integrity sha1-+sY6Cz0R3qy7CWmrzJOyFLzhntU=
|
||||
dependencies:
|
||||
speedometer "~1.0.0"
|
||||
through2 "~2.0.3"
|
||||
|
||||
promise-toolbox@^0.11.0:
|
||||
version "0.11.0"
|
||||
resolved "https://registry.yarnpkg.com/promise-toolbox/-/promise-toolbox-0.11.0.tgz#9ed928355355395072dace3f879879504e07d1bc"
|
||||
integrity sha512-bjHk0kq+Ke3J3zbkbbJH6kXCyQZbFHwOTrE/Et7vS0uS0tluoV+PLqU/kEyxl8aARM7v04y2wFoDo/wWAEPvjA==
|
||||
dependencies:
|
||||
make-error "^1.3.2"
|
||||
|
||||
"readable-stream@>= 0.3.0", readable-stream@^3.1.1:
|
||||
version "3.1.1"
|
||||
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.1.1.tgz#ed6bbc6c5ba58b090039ff18ce670515795aeb06"
|
||||
integrity sha512-DkN66hPyqDhnIQ6Jcsvx9bFjhw214O4poMBcIMgPVpQvNy9a0e0Uhg5SqySyDKAmUlwt8LonTBz1ezOnM8pUdA==
|
||||
dependencies:
|
||||
inherits "^2.0.3"
|
||||
string_decoder "^1.1.1"
|
||||
util-deprecate "^1.0.1"
|
||||
|
||||
readable-stream@~2.3.6:
|
||||
version "2.3.6"
|
||||
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf"
|
||||
integrity sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==
|
||||
dependencies:
|
||||
core-util-is "~1.0.0"
|
||||
inherits "~2.0.3"
|
||||
isarray "~1.0.0"
|
||||
process-nextick-args "~2.0.0"
|
||||
safe-buffer "~5.1.1"
|
||||
string_decoder "~1.1.1"
|
||||
util-deprecate "~1.0.1"
|
||||
|
||||
safe-buffer@~5.1.0, safe-buffer@~5.1.1:
|
||||
version "5.1.2"
|
||||
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d"
|
||||
integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
|
||||
|
||||
speedometer@~1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/speedometer/-/speedometer-1.0.0.tgz#cd671cb06752c22bca3370e2f334440be4fc62e2"
|
||||
integrity sha1-zWccsGdSwivKM3Di8zREC+T8YuI=
|
||||
|
||||
"stream-parser@>= 0.0.2":
|
||||
version "0.3.1"
|
||||
resolved "https://registry.yarnpkg.com/stream-parser/-/stream-parser-0.3.1.tgz#1618548694420021a1182ff0af1911c129761773"
|
||||
integrity sha1-FhhUhpRCACGhGC/wrxkRwSl2F3M=
|
||||
dependencies:
|
||||
debug "2"
|
||||
|
||||
string_decoder@^1.1.1:
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.2.0.tgz#fe86e738b19544afe70469243b2a1ee9240eae8d"
|
||||
integrity sha512-6YqyX6ZWEYguAxgZzHGL7SsCeGx3V2TtOTqZz1xSTSWnqsbWwbptafNyvf/ACquZUXV3DANr5BDIwNYe1mN42w==
|
||||
dependencies:
|
||||
safe-buffer "~5.1.0"
|
||||
|
||||
string_decoder@~1.1.1:
|
||||
version "1.1.1"
|
||||
resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8"
|
||||
integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==
|
||||
dependencies:
|
||||
safe-buffer "~5.1.0"
|
||||
|
||||
throttle@^1.0.3:
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/throttle/-/throttle-1.0.3.tgz#8a32e4a15f1763d997948317c5ebe3ad8a41e4b7"
|
||||
integrity sha1-ijLkoV8XY9mXlIMXxevjrYpB5Lc=
|
||||
dependencies:
|
||||
readable-stream ">= 0.3.0"
|
||||
stream-parser ">= 0.0.2"
|
||||
|
||||
through2@~2.0.3:
|
||||
version "2.0.5"
|
||||
resolved "https://registry.yarnpkg.com/through2/-/through2-2.0.5.tgz#01c1e39eb31d07cb7d03a96a70823260b23132cd"
|
||||
integrity sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==
|
||||
dependencies:
|
||||
readable-stream "~2.3.6"
|
||||
xtend "~4.0.1"
|
||||
|
||||
util-deprecate@^1.0.1, util-deprecate@~1.0.1:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
|
||||
integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=
|
||||
|
||||
xtend@~4.0.1:
|
||||
version "4.0.1"
|
||||
resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af"
|
||||
integrity sha1-pcbVMr5lbiPbgg77lDofBJmNY68=
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.27.1",
|
||||
"version": "0.27.2",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
@@ -61,7 +61,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const URL_RE = /^(?:(https?:)\/*)?(?:([^:]+):([^@]+)@)?([^/]+?)(?::([0-9]+))?\/?$/
|
||||
const URL_RE = /^(?:(https?:)\/*)?(?:([^:]+):([^@]+)@)?(?:\[([^\]]+)\]|([^:/]+))(?::([0-9]+))?\/?$/
|
||||
|
||||
export default url => {
|
||||
const matches = URL_RE.exec(url)
|
||||
@@ -6,7 +6,15 @@ export default url => {
|
||||
throw new Error('invalid URL: ' + url)
|
||||
}
|
||||
|
||||
const [, protocol = 'https:', username, password, hostname, port] = matches
|
||||
const [
|
||||
,
|
||||
protocol = 'https:',
|
||||
username,
|
||||
password,
|
||||
ipv6,
|
||||
hostname = ipv6,
|
||||
port,
|
||||
] = matches
|
||||
const parsedUrl = { protocol, hostname, port }
|
||||
if (username !== undefined) {
|
||||
parsedUrl.username = decodeURIComponent(username)
|
||||
|
||||
@@ -57,7 +57,7 @@
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -199,7 +199,18 @@ function main(args) {
|
||||
return exports[fnName](args.slice(1))
|
||||
}
|
||||
|
||||
return exports.call(args)
|
||||
return exports.call(args).catch(error => {
|
||||
if (!(error != null && error.code === 10 && 'errors' in error.data)) {
|
||||
throw error
|
||||
}
|
||||
|
||||
const lines = [error.message]
|
||||
const { errors } = error.data
|
||||
errors.forEach(error => {
|
||||
lines.push(` property ${error.property}: ${error.message}`)
|
||||
})
|
||||
throw lines.join('\n')
|
||||
})
|
||||
}
|
||||
exports = module.exports = main
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"deep-freeze": "^0.0.1",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"dependencies": {
|
||||
"event-to-promise": "^0.8.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"inquirer": "^6.0.0",
|
||||
"inquirer": "^7.0.0",
|
||||
"ldapjs": "^1.0.1",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
@@ -47,7 +47,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-preset-env": "^1.6.1",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-cloud",
|
||||
"version": "0.2.4",
|
||||
"version": "0.3.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [
|
||||
@@ -40,7 +40,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.5.4"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -20,9 +20,13 @@ class XoServerCloud {
|
||||
}
|
||||
|
||||
async load() {
|
||||
const getResourceCatalog = () => this._getCatalog()
|
||||
getResourceCatalog.description = 'Get the list of all available resources'
|
||||
const getResourceCatalog = this._getCatalog.bind(this)
|
||||
getResourceCatalog.description =
|
||||
"Get the list of user's available resources"
|
||||
getResourceCatalog.permission = 'admin'
|
||||
getResourceCatalog.params = {
|
||||
filters: { type: 'object', optional: true },
|
||||
}
|
||||
|
||||
const registerResource = ({ namespace }) =>
|
||||
this._registerResource(namespace)
|
||||
@@ -34,8 +38,29 @@ class XoServerCloud {
|
||||
}
|
||||
registerResource.permission = 'admin'
|
||||
|
||||
const downloadAndInstallResource = this._downloadAndInstallResource.bind(
|
||||
this
|
||||
)
|
||||
|
||||
downloadAndInstallResource.description =
|
||||
'Download and install a resource via cloud plugin'
|
||||
|
||||
downloadAndInstallResource.params = {
|
||||
id: { type: 'string' },
|
||||
namespace: { type: 'string' },
|
||||
version: { type: 'string' },
|
||||
sr: { type: 'string' },
|
||||
}
|
||||
|
||||
downloadAndInstallResource.resolve = {
|
||||
sr: ['sr', 'SR', 'administrate'],
|
||||
}
|
||||
|
||||
downloadAndInstallResource.permission = 'admin'
|
||||
|
||||
this._unsetApiMethods = this._xo.addApiMethods({
|
||||
cloud: {
|
||||
downloadAndInstallResource,
|
||||
getResourceCatalog,
|
||||
registerResource,
|
||||
},
|
||||
@@ -66,8 +91,8 @@ class XoServerCloud {
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getCatalog() {
|
||||
const catalog = await this._updater.call('getResourceCatalog')
|
||||
async _getCatalog({ filters } = {}) {
|
||||
const catalog = await this._updater.call('getResourceCatalog', { filters })
|
||||
|
||||
if (!catalog) {
|
||||
throw new Error('cannot get catalog')
|
||||
@@ -90,6 +115,26 @@ class XoServerCloud {
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _downloadAndInstallResource({ id, namespace, sr, version }) {
|
||||
const stream = await this._requestResource({
|
||||
hub: true,
|
||||
id,
|
||||
namespace,
|
||||
version,
|
||||
})
|
||||
const vm = await this._xo.getXapi(sr.$poolId).importVm(stream, {
|
||||
srId: sr.id,
|
||||
type: 'xva',
|
||||
})
|
||||
await vm.update_other_config({
|
||||
'xo:resource:namespace': namespace,
|
||||
'xo:resource:xva:version': version,
|
||||
'xo:resource:xva:id': id,
|
||||
})
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _registerResource(namespace) {
|
||||
const _namespace = (await this._getNamespaces())[namespace]
|
||||
|
||||
@@ -106,8 +151,10 @@ class XoServerCloud {
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getNamespaceCatalog(namespace) {
|
||||
const namespaceCatalog = (await this._getCatalog())[namespace]
|
||||
async _getNamespaceCatalog({ hub, namespace }) {
|
||||
const namespaceCatalog = (await this._getCatalog({ filters: { hub } }))[
|
||||
namespace
|
||||
]
|
||||
|
||||
if (!namespaceCatalog) {
|
||||
throw new Error(`cannot get catalog: ${namespace} not registered`)
|
||||
@@ -118,14 +165,17 @@ class XoServerCloud {
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _requestResource(namespace, id, version) {
|
||||
async _requestResource({ hub = false, id, namespace, version }) {
|
||||
const _namespace = (await this._getNamespaces())[namespace]
|
||||
|
||||
if (!_namespace || !_namespace.registered) {
|
||||
if (!hub && (!_namespace || !_namespace.registered)) {
|
||||
throw new Error(`cannot get resource: ${namespace} not registered`)
|
||||
}
|
||||
|
||||
const { _token: token } = await this._getNamespaceCatalog(namespace)
|
||||
const { _token: token } = await this._getNamespaceCatalog({
|
||||
hub,
|
||||
namespace,
|
||||
})
|
||||
|
||||
// 2018-03-20 Extra check: getResourceDownloadToken seems to be called without a token in some cases
|
||||
if (token === undefined) {
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/cron": "^1.0.4",
|
||||
"lodash": "^4.16.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/cron": "^1.0.4",
|
||||
"d3-time-format": "^2.1.1",
|
||||
"json5": "^2.0.1",
|
||||
"lodash": "^4.17.4"
|
||||
@@ -33,7 +33,7 @@
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,31 +1,14 @@
|
||||
# xo-server-sdn-controller [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||
XO Server plugin that allows the creation of pool-wide private networks.
|
||||
XO Server plugin that allows the creation of pool-wide and cross-pool private networks.
|
||||
|
||||
## Install
|
||||
|
||||
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
|
||||
|
||||
## Usage
|
||||
## Documentation
|
||||
|
||||
### Network creation
|
||||
|
||||
In the network creation view, select a `pool` and `Private network`.
|
||||
Create the network.
|
||||
|
||||
Choice is offer between `GRE` and `VxLAN`, if `VxLAN` is chosen, then the port 4789 must be open for UDP traffic.
|
||||
The following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `VxLAN` is wanted:
|
||||
`-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m udp --dport 4789 -j ACCEPT`
|
||||
|
||||
### Configuration
|
||||
|
||||
Like all other xo-server plugins, it can be configured directly via
|
||||
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
|
||||
|
||||
The plugin's configuration contains:
|
||||
- `cert-dir`: A path where to find the certificates to create SSL connections with the hosts.
|
||||
If none is provided, the plugin will create its own self-signed certificates.
|
||||
- `override-certs:` Whether or not to uninstall an already existing SDN controller CA certificate in order to replace it by the plugin's one.
|
||||
Please see the plugin's [official documentation](https://xen-orchestra.com/docs/sdn_controller.html).
|
||||
|
||||
## Contributions
|
||||
|
||||
|
||||
@@ -15,13 +15,14 @@
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"version": "0.1.1",
|
||||
"version": "0.3.0",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.4.4",
|
||||
"@babel/core": "^7.4.4",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.4",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.4.4",
|
||||
"cross-env": "^5.2.0"
|
||||
@@ -29,8 +30,9 @@
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"lodash": "^4.17.11",
|
||||
"node-openssl-cert": "^0.0.84",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
"node-openssl-cert": "^0.0.97",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"uuid": "^3.3.2"
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,8 @@
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import forOwn from 'lodash/forOwn'
|
||||
import fromEvent from 'promise-toolbox/fromEvent'
|
||||
import { connect } from 'tls'
|
||||
import { forOwn, toPairs } from 'lodash'
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller:ovsdb-client')
|
||||
|
||||
@@ -10,44 +10,57 @@ const OVSDB_PORT = 6640
|
||||
|
||||
// =============================================================================
|
||||
|
||||
function toMap(object) {
|
||||
return ['map', toPairs(object)]
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export class OvsdbClient {
|
||||
/*
|
||||
Create an SSL connection to an XCP-ng host.
|
||||
Interact with the host's OpenVSwitch (OVS) daemon to create and manage the virtual bridges
|
||||
corresponding to the private networks with OVSDB (OpenVSwitch DataBase) Protocol.
|
||||
See:
|
||||
- OVSDB Protocol: https://tools.ietf.org/html/rfc7047
|
||||
- OVS Tunneling : http://docs.openvswitch.org/en/latest/howto/tunneling/
|
||||
- OVS IPSEC : http://docs.openvswitch.org/en/latest/howto/ipsec/
|
||||
|
||||
Attributes on created OVS ports (corresponds to a XAPI `PIF` or `VIF`):
|
||||
- `other_config`:
|
||||
- `xo:sdn-controller:cross-pool` : UUID of the remote network connected by the tunnel
|
||||
- `xo:sdn-controller:private-pool-wide`: `true` if created (and managed) by a SDN Controller
|
||||
|
||||
Attributes on created OVS interfaces:
|
||||
- `options`:
|
||||
- `key` : Network's VNI
|
||||
- `remote_ip`: Remote IP of the tunnel
|
||||
*/
|
||||
|
||||
constructor(host, clientKey, clientCert, caCert) {
|
||||
this._host = host
|
||||
this._numberOfPortAndInterface = 0
|
||||
this._requestID = 0
|
||||
this._requestId = 0
|
||||
|
||||
this._adding = []
|
||||
|
||||
this.host = host
|
||||
|
||||
this.updateCertificates(clientKey, clientCert, caCert)
|
||||
|
||||
log.debug('New OVSDB client', {
|
||||
host: this._host.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
get address() {
|
||||
return this._host.address
|
||||
}
|
||||
|
||||
get host() {
|
||||
return this._host.$ref
|
||||
}
|
||||
|
||||
get id() {
|
||||
return this._host.$id
|
||||
}
|
||||
|
||||
get hostMetricsRef() {
|
||||
return this._host.metrics
|
||||
}
|
||||
|
||||
updateCertificates(clientKey, clientCert, caCert) {
|
||||
this._clientKey = clientKey
|
||||
this._clientCert = clientCert
|
||||
this._caCert = caCert
|
||||
|
||||
log.debug('Certificates have been updated', {
|
||||
host: this._host.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -57,19 +70,32 @@ export class OvsdbClient {
|
||||
networkUuid,
|
||||
networkName,
|
||||
remoteAddress,
|
||||
encapsulation
|
||||
encapsulation,
|
||||
key,
|
||||
password,
|
||||
remoteNetwork
|
||||
) {
|
||||
const socket = await this._connect()
|
||||
const index = this._numberOfPortAndInterface
|
||||
++this._numberOfPortAndInterface
|
||||
if (
|
||||
this._adding.find(
|
||||
elem => elem.id === networkUuid && elem.addr === remoteAddress
|
||||
) !== undefined
|
||||
) {
|
||||
return
|
||||
}
|
||||
const adding = { id: networkUuid, addr: remoteAddress }
|
||||
this._adding.push(adding)
|
||||
|
||||
const socket = await this._connect()
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid == null) {
|
||||
if (bridgeUuid === undefined) {
|
||||
socket.destroy()
|
||||
this._adding = this._adding.filter(
|
||||
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -81,35 +107,47 @@ export class OvsdbClient {
|
||||
)
|
||||
if (alreadyExist) {
|
||||
socket.destroy()
|
||||
return
|
||||
this._adding = this._adding.filter(
|
||||
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
|
||||
)
|
||||
return bridgeName
|
||||
}
|
||||
|
||||
const interfaceName = 'tunnel_iface' + index
|
||||
const portName = 'tunnel_port' + index
|
||||
const index = ++this._numberOfPortAndInterface
|
||||
const interfaceName = bridgeName + '_iface' + index
|
||||
const portName = bridgeName + '_port' + index
|
||||
|
||||
// Add interface and port to the bridge
|
||||
const options = ['map', [['remote_ip', remoteAddress]]]
|
||||
const options = { remote_ip: remoteAddress, key: key }
|
||||
if (password !== undefined) {
|
||||
options.psk = password
|
||||
}
|
||||
const addInterfaceOperation = {
|
||||
op: 'insert',
|
||||
table: 'Interface',
|
||||
row: {
|
||||
type: encapsulation,
|
||||
options: options,
|
||||
options: toMap(options),
|
||||
name: interfaceName,
|
||||
other_config: ['map', [['private_pool_wide', 'true']]],
|
||||
},
|
||||
'uuid-name': 'new_iface',
|
||||
}
|
||||
|
||||
const addPortOperation = {
|
||||
op: 'insert',
|
||||
table: 'Port',
|
||||
row: {
|
||||
name: portName,
|
||||
interfaces: ['set', [['named-uuid', 'new_iface']]],
|
||||
other_config: ['map', [['private_pool_wide', 'true']]],
|
||||
other_config: toMap(
|
||||
remoteNetwork !== undefined
|
||||
? { 'xo:sdn-controller:cross-pool': remoteNetwork }
|
||||
: { 'xo:sdn-controller:private-pool-wide': 'true' }
|
||||
),
|
||||
},
|
||||
'uuid-name': 'new_port',
|
||||
}
|
||||
|
||||
const mutateBridgeOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
@@ -123,7 +161,11 @@ export class OvsdbClient {
|
||||
mutateBridgeOperation,
|
||||
]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects == null) {
|
||||
|
||||
this._adding = this._adding.filter(
|
||||
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
|
||||
)
|
||||
if (jsonObjects === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
@@ -134,14 +176,14 @@ export class OvsdbClient {
|
||||
let opResult
|
||||
do {
|
||||
opResult = jsonObjects[0].result[i]
|
||||
if (opResult != null && opResult.error != null) {
|
||||
if (opResult?.error !== undefined) {
|
||||
error = opResult.error
|
||||
details = opResult.details
|
||||
}
|
||||
++i
|
||||
} while (opResult && !error)
|
||||
} while (opResult !== undefined && error === undefined)
|
||||
|
||||
if (error != null) {
|
||||
if (error !== undefined) {
|
||||
log.error('Error while adding port and interface to bridge', {
|
||||
error,
|
||||
details,
|
||||
@@ -149,7 +191,7 @@ export class OvsdbClient {
|
||||
interface: interfaceName,
|
||||
bridge: bridgeName,
|
||||
network: networkName,
|
||||
host: this._host.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
return
|
||||
@@ -160,26 +202,32 @@ export class OvsdbClient {
|
||||
interface: interfaceName,
|
||||
bridge: bridgeName,
|
||||
network: networkName,
|
||||
host: this._host.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
return bridgeName
|
||||
}
|
||||
|
||||
async resetForNetwork(networkUuid, networkName) {
|
||||
async resetForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
crossPoolOnly,
|
||||
remoteNetwork
|
||||
) {
|
||||
const socket = await this._connect()
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid == null) {
|
||||
if (bridgeUuid === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
// Delete old ports created by a SDN controller
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
if (ports == null) {
|
||||
if (ports === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
@@ -194,12 +242,25 @@ export class OvsdbClient {
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult == null) {
|
||||
if (selectResult === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
forOwn(selectResult.other_config[1], config => {
|
||||
if (config[0] === 'private_pool_wide' && config[1] === 'true') {
|
||||
// 2019-09-03
|
||||
// Compatibility code, to be removed in 1 year.
|
||||
const oldShouldDelete =
|
||||
(config[0] === 'private_pool_wide' && !crossPoolOnly) ||
|
||||
(config[0] === 'cross_pool' &&
|
||||
(remoteNetwork === undefined || remoteNetwork === config[1]))
|
||||
|
||||
const shouldDelete =
|
||||
(config[0] === 'xo:sdn-controller:private-pool-wide' &&
|
||||
!crossPoolOnly) ||
|
||||
(config[0] === 'xo:sdn-controller:cross-pool' &&
|
||||
(remoteNetwork === undefined || remoteNetwork === config[1]))
|
||||
|
||||
if (shouldDelete || oldShouldDelete) {
|
||||
portsToDelete.push(['uuid', portUuid])
|
||||
}
|
||||
})
|
||||
@@ -220,15 +281,15 @@ export class OvsdbClient {
|
||||
|
||||
const params = ['Open_vSwitch', mutateBridgeOperation]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects == null) {
|
||||
if (jsonObjects === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
if (jsonObjects[0].error != null) {
|
||||
log.error('Error while deleting ports from bridge', {
|
||||
error: jsonObjects.error,
|
||||
error: jsonObjects[0].error,
|
||||
bridge: bridgeName,
|
||||
host: this._host.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
return
|
||||
@@ -237,7 +298,7 @@ export class OvsdbClient {
|
||||
log.debug('Ports deleted from bridge', {
|
||||
nPorts: jsonObjects[0].result[0].count,
|
||||
bridge: bridgeName,
|
||||
host: this._host.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
}
|
||||
@@ -254,9 +315,9 @@ export class OvsdbClient {
|
||||
for (let i = pos; i < data.length; ++i) {
|
||||
const c = data.charAt(i)
|
||||
if (c === '{') {
|
||||
depth++
|
||||
++depth
|
||||
} else if (c === '}') {
|
||||
depth--
|
||||
--depth
|
||||
if (depth === 0) {
|
||||
const object = JSON.parse(buffer + data.substr(0, i + 1))
|
||||
objects.push(object)
|
||||
@@ -276,11 +337,7 @@ export class OvsdbClient {
|
||||
|
||||
async _getBridgeUuidForNetwork(networkUuid, networkName, socket) {
|
||||
const where = [
|
||||
[
|
||||
'external_ids',
|
||||
'includes',
|
||||
['map', [['xs-network-uuids', networkUuid]]],
|
||||
],
|
||||
['external_ids', 'includes', toMap({ 'xs-network-uuids': networkUuid })],
|
||||
]
|
||||
const selectResult = await this._select(
|
||||
'Bridge',
|
||||
@@ -288,12 +345,12 @@ export class OvsdbClient {
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult == null) {
|
||||
if (selectResult === undefined) {
|
||||
log.error('No bridge found for network', {
|
||||
network: networkName,
|
||||
host: this._host.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return [null, null]
|
||||
return []
|
||||
}
|
||||
|
||||
const bridgeUuid = selectResult._uuid[1]
|
||||
@@ -309,14 +366,14 @@ export class OvsdbClient {
|
||||
socket
|
||||
) {
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
if (ports == null) {
|
||||
return
|
||||
if (ports === undefined) {
|
||||
return false
|
||||
}
|
||||
|
||||
for (const port of ports) {
|
||||
const portUuid = port[1]
|
||||
const interfaces = await this._getPortInterfaces(portUuid, socket)
|
||||
if (interfaces == null) {
|
||||
if (interfaces === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -327,7 +384,7 @@ export class OvsdbClient {
|
||||
remoteAddress,
|
||||
socket
|
||||
)
|
||||
if (hasRemote === true) {
|
||||
if (hasRemote) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -339,8 +396,8 @@ export class OvsdbClient {
|
||||
async _getBridgePorts(bridgeUuid, bridgeName, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', bridgeUuid]]]
|
||||
const selectResult = await this._select('Bridge', ['ports'], where, socket)
|
||||
if (selectResult == null) {
|
||||
return null
|
||||
if (selectResult === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
return selectResult.ports[0] === 'set'
|
||||
@@ -356,8 +413,8 @@ export class OvsdbClient {
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult == null) {
|
||||
return null
|
||||
if (selectResult === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
return selectResult.interfaces[0] === 'set'
|
||||
@@ -373,7 +430,7 @@ export class OvsdbClient {
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult == null) {
|
||||
if (selectResult === undefined) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -398,20 +455,20 @@ export class OvsdbClient {
|
||||
|
||||
const params = ['Open_vSwitch', selectOperation]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects == null) {
|
||||
if (jsonObjects === undefined) {
|
||||
return
|
||||
}
|
||||
const jsonResult = jsonObjects[0].result[0]
|
||||
if (jsonResult.error != null) {
|
||||
if (jsonResult.error !== undefined) {
|
||||
log.error('Error while selecting columns', {
|
||||
error: jsonResult.error,
|
||||
details: jsonResult.details,
|
||||
columns,
|
||||
table,
|
||||
where,
|
||||
host: this._host.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return null
|
||||
return
|
||||
}
|
||||
|
||||
if (jsonResult.rows.length === 0) {
|
||||
@@ -419,15 +476,15 @@ export class OvsdbClient {
|
||||
columns,
|
||||
table,
|
||||
where,
|
||||
host: this._host.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return null
|
||||
return
|
||||
}
|
||||
|
||||
// For now all select operations should return only 1 row
|
||||
assert(
|
||||
jsonResult.rows.length === 1,
|
||||
`[${this._host.name_label}] There should exactly 1 row when searching: '${columns}' in: '${table}' where: '${where}'`
|
||||
`[${this.host.name_label}] There should be exactly 1 row when searching: '${columns}' in: '${table}' where: '${where}'`
|
||||
)
|
||||
|
||||
return jsonResult.rows[0]
|
||||
@@ -435,9 +492,7 @@ export class OvsdbClient {
|
||||
|
||||
async _sendOvsdbTransaction(params, socket) {
|
||||
const stream = socket
|
||||
|
||||
const requestId = this._requestID
|
||||
++this._requestID
|
||||
const requestId = ++this._requestId
|
||||
const req = {
|
||||
id: requestId,
|
||||
method: 'transact',
|
||||
@@ -449,9 +504,9 @@ export class OvsdbClient {
|
||||
} catch (error) {
|
||||
log.error('Error while writing into stream', {
|
||||
error,
|
||||
host: this._host.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return null
|
||||
return
|
||||
}
|
||||
|
||||
let result
|
||||
@@ -463,9 +518,9 @@ export class OvsdbClient {
|
||||
} catch (error) {
|
||||
log.error('Error while waiting for stream data', {
|
||||
error,
|
||||
host: this._host.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return null
|
||||
return
|
||||
}
|
||||
|
||||
jsonObjects = this._parseJson(result)
|
||||
@@ -482,7 +537,7 @@ export class OvsdbClient {
|
||||
ca: this._caCert,
|
||||
key: this._clientKey,
|
||||
cert: this._clientCert,
|
||||
host: this._host.address,
|
||||
host: this.host.address,
|
||||
port: OVSDB_PORT,
|
||||
rejectUnauthorized: false,
|
||||
requestCert: false,
|
||||
@@ -495,7 +550,7 @@ export class OvsdbClient {
|
||||
log.error('TLS connection failed', {
|
||||
error,
|
||||
code: error.code,
|
||||
host: this._host.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
throw error
|
||||
}
|
||||
@@ -504,7 +559,7 @@ export class OvsdbClient {
|
||||
log.error('Socket error', {
|
||||
error,
|
||||
code: error.code,
|
||||
host: this._host.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
> Test client for Xo-Server
|
||||
|
||||
Tests are ran sequentially to avoid concurrency issues.
|
||||
|
||||
## Adding a test
|
||||
|
||||
### Organization
|
||||
@@ -13,15 +15,20 @@ src
|
||||
| | └─ index.spec.js.snap
|
||||
| └─ index.spec.js
|
||||
├─ job
|
||||
| └─ index.spec.js
|
||||
├─ issues
|
||||
¦ └─ index.spec.js
|
||||
¦
|
||||
¦
|
||||
├─ _xoConnection.js
|
||||
└─ util.js
|
||||
```
|
||||
|
||||
The tests can describe xo methods or scenarios:
|
||||
```javascript
|
||||
The tests can describe:
|
||||
|
||||
- XO methods or scenarios:
|
||||
|
||||
`src/user/index.js`
|
||||
```js
|
||||
import xo from "../_xoConnection";
|
||||
|
||||
describe("user", () => {
|
||||
@@ -44,6 +51,16 @@ describe("user", () => {
|
||||
});
|
||||
|
||||
});
|
||||
```
|
||||
- issues
|
||||
|
||||
`src/issues/index.js`
|
||||
```js
|
||||
describe("issue", () => {
|
||||
test("5454", () => {
|
||||
/* some tests */
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### Best practices
|
||||
@@ -120,6 +137,13 @@ describe("user", () => {
|
||||
|
||||
- You can run only tests related to changed files, and review the failed output by using: `> yarn test --watch`
|
||||
|
||||
- ⚠ Warning: snapshots ⚠
|
||||
After each run of the tests, check that snapshots are not inadvertently modified.
|
||||
|
||||
- ⚠ Jest known issue ⚠
|
||||
If a test timeout is triggered the next async tests can fail, it's due to an inadvertently modified snapshots.
|
||||
As a workaround, you can clean your git working tree and re-run jest using a large timeout: `> yarn test --testTimeout=100000`
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
"golike-defer": "^0.4.1",
|
||||
"jest": "^24.8.0",
|
||||
"lodash": "^4.17.11",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"xo-collection": "^0.4.1",
|
||||
"xo-common": "^0.2.0",
|
||||
"xo-lib": "^0.9.0"
|
||||
@@ -49,6 +50,7 @@
|
||||
"<rootDir>/src/old-tests"
|
||||
],
|
||||
"testEnvironment": "node",
|
||||
"testRegex": "\\.spec\\.js$"
|
||||
"testRegex": "\\.spec\\.js$",
|
||||
"maxConcurrency": 1
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,16 +3,27 @@
|
||||
email = ''
|
||||
password = ''
|
||||
|
||||
[pools]
|
||||
default = ''
|
||||
|
||||
[servers]
|
||||
[servers.default]
|
||||
username = ''
|
||||
password = ''
|
||||
host = ''
|
||||
|
||||
[vms]
|
||||
default = ''
|
||||
# vmToBackup = ''
|
||||
|
||||
[templates]
|
||||
default = ''
|
||||
templateWithoutDisks = ''
|
||||
|
||||
[srs]
|
||||
default = ''
|
||||
|
||||
# resources created before all tests and deleted at the end.
|
||||
[preCreatedResources]
|
||||
[preCreatedResources.remotes]
|
||||
default = { name = '', url = '' }
|
||||
[remotes]
|
||||
default = { name = '', url = '' }
|
||||
remote1 = { name = '', url = '' }
|
||||
# remote2 = { name = '', url = '' }
|
||||
|
||||
@@ -3,16 +3,10 @@ import defer from 'golike-defer'
|
||||
import Xo from 'xo-lib'
|
||||
import XoCollection from 'xo-collection'
|
||||
import { find, forOwn } from 'lodash'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import config from './_config'
|
||||
|
||||
const ARGS_BY_TYPE = {
|
||||
remotes: {
|
||||
getCreationArgs: conf => ['remote.create', conf],
|
||||
getDeletionArgs: res => ['remote.delete', { id: res.id }],
|
||||
},
|
||||
}
|
||||
|
||||
const getDefaultCredentials = () => {
|
||||
const { email, password } = config.xoConnection
|
||||
return { email, password }
|
||||
@@ -93,7 +87,7 @@ class XoConnection extends Xo {
|
||||
while (true) {
|
||||
try {
|
||||
await predicate(obj)
|
||||
return
|
||||
return obj
|
||||
} catch (_) {}
|
||||
// If failed, wait for next object state/update and retry.
|
||||
obj = await this.waitObject(id)
|
||||
@@ -122,43 +116,110 @@ class XoConnection extends Xo {
|
||||
return job
|
||||
}
|
||||
|
||||
async createTempVm(params) {
|
||||
const id = await this.call('vm.create', params)
|
||||
this._tempResourceDisposers.push('vm.delete', { id })
|
||||
await this.waitObjectState(id, vm => {
|
||||
if (vm.type !== 'VM') throw new Error('retry')
|
||||
async createTempNetwork(params) {
|
||||
const id = await this.call('network.create', {
|
||||
name: 'XO Test',
|
||||
pool: config.pools.default,
|
||||
...params,
|
||||
})
|
||||
return id
|
||||
this._tempResourceDisposers.push('network.delete', { id })
|
||||
return this.getOrWaitObject(id)
|
||||
}
|
||||
|
||||
async createRequiredResources() {
|
||||
const requiredResources = {}
|
||||
const resourcesToCreate = config.preCreatedResources
|
||||
for (const typeOfResources in resourcesToCreate) {
|
||||
const { getCreationArgs, getDeletionArgs } = ARGS_BY_TYPE[typeOfResources]
|
||||
const resources = resourcesToCreate[typeOfResources]
|
||||
for (const resource in resources) {
|
||||
const result = await this.call(...getCreationArgs(resources[resource]))
|
||||
this._durableResourceDisposers.push(...getDeletionArgs(result))
|
||||
requiredResources[typeOfResources] = {
|
||||
...requiredResources[typeOfResources],
|
||||
[resource]: result,
|
||||
}
|
||||
async createTempVm(params) {
|
||||
const id = await this.call('vm.create', {
|
||||
name_label: 'XO Test',
|
||||
template: config.templates.templateWithoutDisks,
|
||||
...params,
|
||||
})
|
||||
this._tempResourceDisposers.push('vm.delete', { id })
|
||||
return this.waitObjectState(id, vm => {
|
||||
if (vm.type !== 'VM') throw new Error('retry')
|
||||
})
|
||||
}
|
||||
|
||||
async createTempRemote(params) {
|
||||
const remote = await this.call('remote.create', params)
|
||||
this._tempResourceDisposers.push('remote.delete', { id: remote.id })
|
||||
return remote
|
||||
}
|
||||
|
||||
async createTempServer(params) {
|
||||
const servers = await this.call('server.getAll')
|
||||
const server = servers.find(server => server.host === params.host)
|
||||
if (server !== undefined) {
|
||||
if (server.status === 'disconnected') {
|
||||
await this.call('server.enable', { id: server.id })
|
||||
this._durableResourceDisposers.push('server.disable', { id: server.id })
|
||||
await fromEvent(this._objects, 'finish')
|
||||
}
|
||||
return
|
||||
}
|
||||
return requiredResources
|
||||
|
||||
const id = await this.call('server.add', {
|
||||
...params,
|
||||
allowUnauthorized: true,
|
||||
autoConnect: false,
|
||||
})
|
||||
this._durableResourceDisposers.push('server.remove', { id })
|
||||
await this.call('server.enable', { id })
|
||||
await fromEvent(this._objects, 'finish')
|
||||
}
|
||||
|
||||
async getSchedule(predicate) {
|
||||
return find(await this.call('schedule.getAll'), predicate)
|
||||
}
|
||||
|
||||
async runBackupJob(jobId, scheduleId, { remotes, nExecutions = 1 }) {
|
||||
for (let i = 0; i < nExecutions; i++) {
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: scheduleId })
|
||||
}
|
||||
const backups = {}
|
||||
if (remotes !== undefined) {
|
||||
const backupsByRemote = await xo.call('backupNg.listVmBackups', {
|
||||
remotes,
|
||||
})
|
||||
forOwn(backupsByRemote, (backupsByVm, remoteId) => {
|
||||
backups[remoteId] = []
|
||||
forOwn(backupsByVm, vmBackups => {
|
||||
vmBackups.forEach(
|
||||
({ jobId: backupJobId, scheduleId: backupScheduleId, id }) => {
|
||||
if (jobId === backupJobId && scheduleId === backupScheduleId) {
|
||||
this._tempResourceDisposers.push('backupNg.deleteVmBackup', {
|
||||
id,
|
||||
})
|
||||
backups[remoteId].push(id)
|
||||
}
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
forOwn(this.objects.all, (obj, id) => {
|
||||
if (
|
||||
obj.other !== undefined &&
|
||||
obj.other['xo:backup:job'] === jobId &&
|
||||
obj.other['xo:backup:schedule'] === scheduleId
|
||||
) {
|
||||
this._tempResourceDisposers.push('vm.delete', {
|
||||
id,
|
||||
})
|
||||
}
|
||||
})
|
||||
return backups
|
||||
}
|
||||
|
||||
getBackupLogs(filter) {
|
||||
return this.call('backupNg.getLogs', { _forceRefresh: true, ...filter })
|
||||
}
|
||||
|
||||
async _cleanDisposers(disposers) {
|
||||
for (let n = disposers.length - 1; n > 0; ) {
|
||||
const params = disposers[n--]
|
||||
const method = disposers[n--]
|
||||
await this.call(method, params).catch(error => {
|
||||
console.warn('_cleanDisposers', method, params, error)
|
||||
console.warn('deleteTempResources', method, params, error)
|
||||
})
|
||||
}
|
||||
disposers.length = 0
|
||||
@@ -179,10 +240,9 @@ const getConnection = credentials => {
|
||||
}
|
||||
|
||||
let xo
|
||||
let resources
|
||||
beforeAll(async () => {
|
||||
// TOFIX: stop tests if the connection is not established properly and show the error
|
||||
xo = await getConnection()
|
||||
resources = await xo.createRequiredResources()
|
||||
})
|
||||
afterAll(async () => {
|
||||
await xo.deleteDurableResources()
|
||||
@@ -191,7 +251,7 @@ afterAll(async () => {
|
||||
})
|
||||
afterEach(() => xo.deleteTempResources())
|
||||
|
||||
export { xo as default, resources }
|
||||
export { xo as default }
|
||||
|
||||
export const testConnection = ({ credentials }) =>
|
||||
getConnection(credentials).then(connection => connection.close())
|
||||
|
||||
@@ -127,6 +127,375 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "delta",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 2`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 3`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": "snapshot",
|
||||
"result": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 4`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": true,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 5`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 6`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 7`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": true,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 8`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 9`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 10`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "delta",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 11`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 12`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": "snapshot",
|
||||
"result": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 13`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": false,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 14`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 15`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 16`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": false,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 17`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 18`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 19`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "delta",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 20`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 21`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": "snapshot",
|
||||
"result": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 22`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": true,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 23`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 24`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 25`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": true,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 26`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 27`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
|
||||
@@ -1,16 +1,75 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { forOwn } from 'lodash'
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import config from '../_config'
|
||||
import randomId from '../_randomId'
|
||||
import xo, { resources } from '../_xoConnection'
|
||||
import xo from '../_xoConnection'
|
||||
|
||||
const DEFAULT_SCHEDULE = {
|
||||
name: 'scheduleTest',
|
||||
cron: '0 * * * * *',
|
||||
}
|
||||
|
||||
const validateRootTask = (log, props) =>
|
||||
expect(log).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
...props,
|
||||
})
|
||||
|
||||
const validateVmTask = (task, vmId, props) => {
|
||||
expect(task).toMatchSnapshot({
|
||||
data: {
|
||||
id: expect.any(String),
|
||||
},
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
...props,
|
||||
})
|
||||
expect(task.data.id).toBe(vmId)
|
||||
}
|
||||
|
||||
const validateSnapshotTask = (task, props) =>
|
||||
expect(task).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
result: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
...props,
|
||||
})
|
||||
|
||||
const validateExportTask = (task, srOrRemoteIds, props) => {
|
||||
expect(task).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
...props,
|
||||
})
|
||||
expect(srOrRemoteIds).toContain(task.data.id)
|
||||
}
|
||||
|
||||
const validateOperationTask = (task, props) => {
|
||||
expect(task).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
...props,
|
||||
})
|
||||
}
|
||||
|
||||
// Note: `bypassVdiChainsCheck` must be enabled because the XAPI might be not
|
||||
// able to coalesce VDIs as fast as the tests run.
|
||||
//
|
||||
// See https://xen-orchestra.com/docs/backup_troubleshooting.html#vdi-chain-protection
|
||||
describe('backupNg', () => {
|
||||
let defaultBackupNg
|
||||
|
||||
@@ -143,6 +202,7 @@ describe('backupNg', () => {
|
||||
})
|
||||
|
||||
it('fails trying to run a backup job with non-existent vm', async () => {
|
||||
jest.setTimeout(7e3)
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
@@ -161,17 +221,19 @@ describe('backupNg', () => {
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
const [log] = await xo.call('backupNg.getLogs', {
|
||||
const [log] = await xo.getBackupLogs({
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
expect(log.warnings).toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to run a backup job with a VM without disks', async () => {
|
||||
const vmIdWithoutDisks = await xo.createTempVm({
|
||||
jest.setTimeout(8e3)
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const { id: vmIdWithoutDisks } = await xo.createTempVm({
|
||||
name_label: 'XO Test Without Disks',
|
||||
name_description: 'Creating a vm without disks',
|
||||
template: config.templates.default,
|
||||
template: config.templates.templateWithoutDisks,
|
||||
})
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
@@ -198,7 +260,7 @@ describe('backupNg', () => {
|
||||
tasks: [vmTask],
|
||||
...log
|
||||
},
|
||||
] = await xo.call('backupNg.getLogs', {
|
||||
] = await xo.getBackupLogs({
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
@@ -227,11 +289,14 @@ describe('backupNg', () => {
|
||||
})
|
||||
|
||||
it('fails trying to run backup job without retentions', async () => {
|
||||
jest.setTimeout(7e3)
|
||||
const scheduleTempId = randomId()
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
remotes: {
|
||||
id: resources.remotes.default.id,
|
||||
id: remoteId,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
@@ -254,7 +319,7 @@ describe('backupNg', () => {
|
||||
tasks: [task],
|
||||
...log
|
||||
},
|
||||
] = await xo.call('backupNg.getLogs', {
|
||||
] = await xo.getBackupLogs({
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
@@ -284,8 +349,9 @@ describe('backupNg', () => {
|
||||
})
|
||||
|
||||
test('execute three times a rolling snapshot with 2 as retention & revert to an old state', async () => {
|
||||
jest.setTimeout(7e4)
|
||||
const vmId = await xo.createTempVm({
|
||||
jest.setTimeout(6e4)
|
||||
await xo.createTempServer(config.servers.default)
|
||||
let vm = await xo.createTempVm({
|
||||
name_label: 'XO Test Temp',
|
||||
name_description: 'Creating a temporary vm',
|
||||
template: config.templates.default,
|
||||
@@ -302,45 +368,46 @@ describe('backupNg', () => {
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
vms: {
|
||||
id: vmId,
|
||||
id: vm.id,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
'': {
|
||||
bypassVdiChainsCheck: true,
|
||||
reportWhen: 'never',
|
||||
},
|
||||
[scheduleTempId]: { snapshotRetention: 2 },
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
for (let i = 0; i < 3; i++) {
|
||||
const oldSnapshots = xo.objects.all[vmId].snapshots
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
await xo.waitObjectState(vmId, ({ snapshots }) => {
|
||||
vm = await xo.waitObjectState(vm.id, ({ snapshots }) => {
|
||||
// Test on updating snapshots.
|
||||
expect(snapshots).not.toEqual(oldSnapshots)
|
||||
expect(snapshots).not.toEqual(vm.snapshots)
|
||||
})
|
||||
}
|
||||
|
||||
const { snapshots, videoram: oldVideoram } = xo.objects.all[vmId]
|
||||
|
||||
// Test on the retention, how many snapshots should be saved.
|
||||
expect(snapshots.length).toBe(2)
|
||||
expect(vm.snapshots.length).toBe(2)
|
||||
|
||||
const newVideoram = 16
|
||||
await xo.call('vm.set', { id: vmId, videoram: newVideoram })
|
||||
await xo.waitObjectState(vmId, ({ videoram }) => {
|
||||
await xo.call('vm.set', { id: vm.id, videoram: newVideoram })
|
||||
await xo.waitObjectState(vm.id, ({ videoram }) => {
|
||||
expect(videoram).toBe(newVideoram.toString())
|
||||
})
|
||||
|
||||
await xo.call('vm.revert', {
|
||||
snapshot: snapshots[0],
|
||||
snapshot: vm.snapshots[0],
|
||||
})
|
||||
|
||||
await xo.waitObjectState(vmId, ({ videoram }) => {
|
||||
expect(videoram).toBe(oldVideoram)
|
||||
await xo.waitObjectState(vm.id, ({ videoram }) => {
|
||||
expect(videoram).toBe(vm.videoram)
|
||||
})
|
||||
|
||||
const [
|
||||
@@ -348,7 +415,7 @@ describe('backupNg', () => {
|
||||
tasks: [{ tasks: subTasks, ...vmTask }],
|
||||
...log
|
||||
},
|
||||
] = await xo.call('backupNg.getLogs', {
|
||||
] = await xo.getBackupLogs({
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
@@ -380,6 +447,142 @@ describe('backupNg', () => {
|
||||
message: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
})
|
||||
expect(vmTask.data.id).toBe(vmId)
|
||||
expect(vmTask.data.id).toBe(vm.id)
|
||||
})
|
||||
|
||||
test('execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval', async () => {
|
||||
jest.setTimeout(12e5)
|
||||
const {
|
||||
vms: { default: defaultVm, vmToBackup = defaultVm },
|
||||
remotes: { default: defaultRemote, remote1, remote2 = defaultRemote },
|
||||
servers: { default: defaultServer },
|
||||
} = config
|
||||
|
||||
expect(vmToBackup).not.toBe(undefined)
|
||||
expect(remote1).not.toBe(undefined)
|
||||
expect(remote2).not.toBe(undefined)
|
||||
|
||||
await xo.createTempServer(defaultServer)
|
||||
const { id: remoteId1 } = await xo.createTempRemote(remote1)
|
||||
const { id: remoteId2 } = await xo.createTempRemote(remote2)
|
||||
const remotes = [remoteId1, remoteId2]
|
||||
|
||||
const exportRetention = 2
|
||||
const fullInterval = 2
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
mode: 'delta',
|
||||
remotes: {
|
||||
id: {
|
||||
__or: remotes,
|
||||
},
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
'': {
|
||||
bypassVdiChainsCheck: true,
|
||||
fullInterval,
|
||||
reportWhen: 'never',
|
||||
},
|
||||
[remoteId1]: { deleteFirst: true },
|
||||
[scheduleTempId]: { exportRetention },
|
||||
},
|
||||
vms: {
|
||||
id: vmToBackup,
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
const nExecutions = 3
|
||||
const backupsByRemote = await xo.runBackupJob(jobId, schedule.id, {
|
||||
remotes,
|
||||
nExecutions,
|
||||
})
|
||||
forOwn(backupsByRemote, backups =>
|
||||
expect(backups.length).toBe(exportRetention)
|
||||
)
|
||||
|
||||
const backupLogs = await xo.getBackupLogs({
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
expect(backupLogs.length).toBe(nExecutions)
|
||||
|
||||
backupLogs.forEach(({ tasks = [], ...log }, key) => {
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: 'delta',
|
||||
reportWhen: 'never',
|
||||
},
|
||||
message: 'backup',
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
const numberOfTasks = {
|
||||
export: 0,
|
||||
merge: 0,
|
||||
snapshot: 0,
|
||||
transfer: 0,
|
||||
vm: 0,
|
||||
}
|
||||
tasks.forEach(({ tasks = [], ...vmTask }) => {
|
||||
if (vmTask.data !== undefined && vmTask.data.type === 'VM') {
|
||||
validateVmTask(vmTask, vmToBackup, { status: 'success' })
|
||||
numberOfTasks.vm++
|
||||
tasks.forEach(({ tasks = [], ...subTask }) => {
|
||||
if (subTask.message === 'snapshot') {
|
||||
validateSnapshotTask(subTask, { status: 'success' })
|
||||
numberOfTasks.snapshot++
|
||||
}
|
||||
if (subTask.message === 'export') {
|
||||
validateExportTask(subTask, remotes, {
|
||||
data: {
|
||||
id: expect.any(String),
|
||||
isFull: key % fullInterval === 0,
|
||||
type: 'remote',
|
||||
},
|
||||
status: 'success',
|
||||
})
|
||||
numberOfTasks.export++
|
||||
let mergeTaskKey, transferTaskKey
|
||||
tasks.forEach((operationTask, key) => {
|
||||
if (
|
||||
operationTask.message === 'transfer' ||
|
||||
operationTask.message === 'merge'
|
||||
) {
|
||||
validateOperationTask(operationTask, {
|
||||
result: { size: expect.any(Number) },
|
||||
status: 'success',
|
||||
})
|
||||
if (operationTask.message === 'transfer') {
|
||||
mergeTaskKey = key
|
||||
numberOfTasks.merge++
|
||||
} else {
|
||||
transferTaskKey = key
|
||||
numberOfTasks.transfer++
|
||||
}
|
||||
}
|
||||
})
|
||||
expect(
|
||||
subTask.data.id === remoteId1
|
||||
? mergeTaskKey > transferTaskKey
|
||||
: mergeTaskKey < transferTaskKey
|
||||
).toBe(true)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
expect(numberOfTasks).toEqual({
|
||||
export: 2,
|
||||
merge: 2,
|
||||
snapshot: 1,
|
||||
transfer: 2,
|
||||
vm: 1,
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
51
packages/xo-server-test/src/issues/index.spec.js
Normal file
51
packages/xo-server-test/src/issues/index.spec.js
Normal file
@@ -0,0 +1,51 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import config from '../_config'
|
||||
import xo from '../_xoConnection'
|
||||
|
||||
describe('issue', () => {
|
||||
test('4507', async () => {
|
||||
await xo.createTempServer(config.servers.default)
|
||||
|
||||
const props = {
|
||||
coresPerSocket: 1,
|
||||
cpuCap: 1,
|
||||
}
|
||||
const vm = await xo.createTempVm(props)
|
||||
expect(vm).toMatchObject(props)
|
||||
|
||||
await xo.call('vm.set', {
|
||||
coresPerSocket: null,
|
||||
cpuCap: null,
|
||||
id: vm.id,
|
||||
})
|
||||
await xo.waitObjectState(vm.id, vm => {
|
||||
expect(vm.coresPerSocket).toBe(undefined)
|
||||
expect(vm.cpuCap).toBe(undefined)
|
||||
})
|
||||
})
|
||||
|
||||
test('4514', async () => {
|
||||
await xo.createTempServer(config.servers.default)
|
||||
|
||||
const oldName = 'Old XO Test name'
|
||||
const { id, name_label } = await xo.createTempNetwork({ name: oldName })
|
||||
expect(name_label).toBe(oldName)
|
||||
|
||||
const newName = 'New XO Test name'
|
||||
await xo.call('network.set', { id, name_label: newName })
|
||||
await xo.waitObjectState(id, ({ name_label }) => {
|
||||
expect(name_label).toBe(newName)
|
||||
})
|
||||
})
|
||||
|
||||
test('4523', async () => {
|
||||
const id = await xo.call('network.create', {
|
||||
name: 'XO Test',
|
||||
pool: config.pools.default,
|
||||
})
|
||||
expect(typeof id).toBe('string')
|
||||
|
||||
await xo.call('network.delete', { id })
|
||||
})
|
||||
})
|
||||
@@ -40,6 +40,7 @@ describe('job', () => {
|
||||
|
||||
describe('.create() :', () => {
|
||||
it('creates a new job', async () => {
|
||||
jest.setTimeout(6e3)
|
||||
const userId = await xo.createTempUser(ADMIN_USER)
|
||||
const { email, password } = ADMIN_USER
|
||||
await testWithOtherConnection({ email, password }, async xo => {
|
||||
@@ -208,6 +209,8 @@ describe('job', () => {
|
||||
})
|
||||
|
||||
it('runs a job', async () => {
|
||||
jest.setTimeout(7e4)
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const jobId = await xo.createTempJob(defaultJob)
|
||||
const snapshots = xo.objects.all[config.vms.default].snapshots
|
||||
await xo.call('job.runSequence', { idSequence: [jobId] })
|
||||
|
||||
@@ -35,6 +35,7 @@ describe('user', () => {
|
||||
},
|
||||
},
|
||||
async data => {
|
||||
jest.setTimeout(6e3)
|
||||
const userId = await xo.createTempUser(data)
|
||||
expect(typeof userId).toBe('string')
|
||||
expect(await xo.getUser(userId)).toMatchSnapshot({
|
||||
@@ -69,6 +70,7 @@ describe('user', () => {
|
||||
|
||||
describe('.changePassword() :', () => {
|
||||
it('changes the actual user password', async () => {
|
||||
jest.setTimeout(7e3)
|
||||
const user = {
|
||||
email: 'wayne7@vates.fr',
|
||||
password: 'batman',
|
||||
@@ -149,6 +151,7 @@ describe('user', () => {
|
||||
},
|
||||
},
|
||||
async data => {
|
||||
jest.setTimeout(6e3)
|
||||
data.id = await xo.createTempUser(SIMPLE_USER)
|
||||
expect(await xo.call('user.set', data)).toBe(true)
|
||||
expect(await xo.getUser(data.id)).toMatchSnapshot({
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-preset-env": "^1.5.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.5.4"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-usage-report",
|
||||
"version": "0.7.2",
|
||||
"version": "0.7.3",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "",
|
||||
"keywords": [
|
||||
@@ -36,7 +36,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/cron": "^1.0.4",
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"handlebars": "^4.0.6",
|
||||
"html-minifier": "^4.0.0",
|
||||
@@ -50,7 +50,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -19,7 +19,7 @@ import {
|
||||
values,
|
||||
zipObject,
|
||||
} from 'lodash'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
import { ignoreErrors, promisify } from 'promise-toolbox'
|
||||
import { readFile, writeFile } from 'fs'
|
||||
|
||||
// ===================================================================
|
||||
@@ -759,14 +759,22 @@ class UsageReportPlugin {
|
||||
}
|
||||
|
||||
async _sendReport(storeData) {
|
||||
const xo = this._xo
|
||||
if (xo.sendEmail === undefined) {
|
||||
ignoreErrors.call(xo.unloadPlugin('usage-report'))
|
||||
throw new Error(
|
||||
'The plugin usage-report requires the plugin transport-email to be loaded'
|
||||
)
|
||||
}
|
||||
|
||||
const data = await dataBuilder({
|
||||
xo: this._xo,
|
||||
xo,
|
||||
storedStatsPath: this._storedStatsPath,
|
||||
all: this._conf.all,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
this._xo.sendEmail({
|
||||
xo.sendEmail({
|
||||
to: this._conf.emails,
|
||||
subject: `[Xen Orchestra] Xo Report - ${currDate}`,
|
||||
markdown: `Hi there,
|
||||
|
||||
@@ -17,9 +17,11 @@ createUserOnFirstSignin = true
|
||||
# their size just by looking at the beginning of the stream.
|
||||
#
|
||||
# But it is a guess, not a certainty, it depends on how the VHDs are formatted
|
||||
# by XenServer, therefore it's disabled for the moment but can be enabled
|
||||
# specifically for a user if necessary.
|
||||
guessVhdSizeOnImport = false
|
||||
# by XenServer.
|
||||
#
|
||||
# This has been tested for 5 months, therefore it's enabled by default but can
|
||||
# be disabled specifically for a user if necessary.
|
||||
guessVhdSizeOnImport = true
|
||||
|
||||
# Whether API logs should contains the full request/response on
|
||||
# errors.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "xo-server",
|
||||
"version": "5.46.0",
|
||||
"version": "5.50.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Server part of Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -35,7 +35,7 @@
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.1",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/cron": "^1.0.4",
|
||||
"@xen-orchestra/defined": "^0.0.0",
|
||||
"@xen-orchestra/emit-async": "^0.0.0",
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
@@ -46,6 +46,7 @@
|
||||
"archiver": "^3.0.0",
|
||||
"async-iterator-to-stream": "^1.0.1",
|
||||
"base64url": "^3.0.0",
|
||||
"bind-property-descriptor": "^1.0.0",
|
||||
"blocked": "^1.2.1",
|
||||
"bluebird": "^3.5.1",
|
||||
"body-parser": "^1.18.2",
|
||||
@@ -123,7 +124,7 @@
|
||||
"value-matcher": "^0.2.0",
|
||||
"vhd-lib": "^0.7.0",
|
||||
"ws": "^6.0.0",
|
||||
"xen-api": "^0.27.1",
|
||||
"xen-api": "^0.27.2",
|
||||
"xml2js": "^0.4.19",
|
||||
"xo-acl-resolver": "^0.4.1",
|
||||
"xo-collection": "^0.4.1",
|
||||
@@ -149,7 +150,7 @@
|
||||
"babel-plugin-transform-dev": "^2.0.1",
|
||||
"cross-env": "^5.1.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
34
packages/xo-server/src/_MultiKeyMap.spec.js
Normal file
34
packages/xo-server/src/_MultiKeyMap.spec.js
Normal file
@@ -0,0 +1,34 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import MultiKeyMap from './_MultiKeyMap'
|
||||
|
||||
describe('MultiKeyMap', () => {
|
||||
it('works', () => {
|
||||
const map = new MultiKeyMap()
|
||||
|
||||
const keys = [
|
||||
// null key
|
||||
[],
|
||||
// simple key
|
||||
['foo'],
|
||||
// composite key
|
||||
['foo', 'bar'],
|
||||
// reverse composite key
|
||||
['bar', 'foo'],
|
||||
]
|
||||
const values = keys.map(() => ({}))
|
||||
|
||||
// set all values first to make sure they are all stored and not only the
|
||||
// last one
|
||||
keys.forEach((key, i) => {
|
||||
map.set(key, values[i])
|
||||
})
|
||||
|
||||
keys.forEach((key, i) => {
|
||||
// copy the key to make sure the array itself is not the key
|
||||
expect(map.get(key.slice())).toBe(values[i])
|
||||
map.delete(key.slice())
|
||||
expect(map.get(key.slice())).toBe(undefined)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -16,13 +16,28 @@ function scheduleRemoveCacheEntry(keys, expires) {
|
||||
|
||||
const defaultKeyFn = () => []
|
||||
|
||||
const { slice } = Array.prototype
|
||||
|
||||
export const REMOVE_CACHE_ENTRY = {}
|
||||
|
||||
// debounce an async function so that all subsequent calls in a delay receive
|
||||
// the same result
|
||||
//
|
||||
// similar to `p-debounce` with `leading` set to `true` but with key support
|
||||
export default (fn, delay, keyFn = defaultKeyFn) => {
|
||||
//
|
||||
// - `delay`: number of milliseconds to cache the response, a function can be
|
||||
// passed to use a custom delay for a call based on its parameters
|
||||
export const debounceWithKey = (fn, delay, keyFn = defaultKeyFn) => {
|
||||
const cache = new MultiKeyMap()
|
||||
return function() {
|
||||
const delayFn = typeof delay === 'number' ? () => delay : delay
|
||||
return function(arg) {
|
||||
if (arg === REMOVE_CACHE_ENTRY) {
|
||||
return removeCacheEntry(
|
||||
cache,
|
||||
ensureArray(keyFn.apply(this, slice.call(arguments, 1)))
|
||||
)
|
||||
}
|
||||
|
||||
const keys = ensureArray(keyFn.apply(this, arguments))
|
||||
let promise = cache.get(keys)
|
||||
if (promise === undefined) {
|
||||
@@ -30,10 +45,15 @@ export default (fn, delay, keyFn = defaultKeyFn) => {
|
||||
const remove = scheduleRemoveCacheEntry.bind(
|
||||
cache,
|
||||
keys,
|
||||
Date.now() + delay
|
||||
Date.now() + delayFn.apply(this, arguments)
|
||||
)
|
||||
promise.then(remove, remove)
|
||||
}
|
||||
return promise
|
||||
}
|
||||
}
|
||||
|
||||
debounceWithKey.decorate = (...params) => (target, name, descriptor) => ({
|
||||
...descriptor,
|
||||
value: debounceWithKey(descriptor.value, ...params),
|
||||
})
|
||||
|
||||
29
packages/xo-server/src/_pDebounceWithKey.spec.js
Normal file
29
packages/xo-server/src/_pDebounceWithKey.spec.js
Normal file
@@ -0,0 +1,29 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { debounceWithKey, REMOVE_CACHE_ENTRY } from './_pDebounceWithKey'
|
||||
|
||||
describe('REMOVE_CACHE_ENTRY', () => {
|
||||
it('clears the cache', async () => {
|
||||
let i = 0
|
||||
const debouncedFn = debounceWithKey(
|
||||
function() {
|
||||
return Promise.resolve(++i)
|
||||
},
|
||||
Infinity,
|
||||
id => id
|
||||
)
|
||||
|
||||
// not cached accross keys
|
||||
expect(await debouncedFn(1)).toBe(1)
|
||||
expect(await debouncedFn(2)).toBe(2)
|
||||
|
||||
// retrieve the already cached values
|
||||
expect(await debouncedFn(1)).toBe(1)
|
||||
expect(await debouncedFn(2)).toBe(2)
|
||||
|
||||
// an entry for a specific key can be removed
|
||||
debouncedFn(REMOVE_CACHE_ENTRY, 1)
|
||||
expect(await debouncedFn(1)).toBe(3)
|
||||
expect(await debouncedFn(2)).toBe(2)
|
||||
})
|
||||
})
|
||||
@@ -3,6 +3,7 @@ import { fromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
|
||||
import createNdJsonStream from '../_createNdJsonStream'
|
||||
import { REMOVE_CACHE_ENTRY } from '../_pDebounceWithKey'
|
||||
import { safeDateFormat } from '../utils'
|
||||
|
||||
export function createJob({ schedules, ...job }) {
|
||||
@@ -184,7 +185,20 @@ getAllLogs.params = {
|
||||
ndjson: { type: 'boolean', optional: true },
|
||||
}
|
||||
|
||||
export function getLogs({ after, before, limit, ...filter }) {
|
||||
export function getLogs({
|
||||
after,
|
||||
before,
|
||||
limit,
|
||||
|
||||
// TODO: it's a temporary work-around which should be removed
|
||||
// when the consolidated logs will be stored in the DB
|
||||
_forceRefresh = false,
|
||||
|
||||
...filter
|
||||
}) {
|
||||
if (_forceRefresh) {
|
||||
this.getBackupNgLogs(REMOVE_CACHE_ENTRY)
|
||||
}
|
||||
return this.getBackupNgLogsSorted({ after, before, limit, filter })
|
||||
}
|
||||
|
||||
@@ -302,7 +316,7 @@ export async function fetchFiles(params) {
|
||||
filename += '.zip'
|
||||
|
||||
return this.registerHttpRequest(handleFetchFiles, params, {
|
||||
suffix: encodeURI(`/${filename}`),
|
||||
suffix: '/' + encodeURIComponent(filename),
|
||||
}).then(url => ({ $getFrom: url }))
|
||||
}
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@ export async function fetchFiles({ format = 'zip', ...params }) {
|
||||
handleFetchFiles,
|
||||
{ ...params, format },
|
||||
{
|
||||
suffix: encodeURI(`/${fileName}`),
|
||||
suffix: '/' + encodeURIComponent(fileName),
|
||||
}
|
||||
).then(url => ({ $getFrom: url }))
|
||||
}
|
||||
|
||||
@@ -221,12 +221,7 @@ emergencyShutdownHost.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function isHostServerTimeConsistent({ host }) {
|
||||
try {
|
||||
await this.getXapi(host).assertConsistentHostServerTime(host._xapiRef)
|
||||
return true
|
||||
} catch (e) {
|
||||
return false
|
||||
}
|
||||
return this.getXapi(host).isHostServerTimeConsistent(host._xapiRef)
|
||||
}
|
||||
|
||||
isHostServerTimeConsistent.params = {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import xapiObjectToXo from '../xapi-object-to-xo'
|
||||
import { mapToArray } from '../utils'
|
||||
|
||||
export function getBondModes() {
|
||||
@@ -12,13 +13,15 @@ export async function create({
|
||||
mtu = 1500,
|
||||
vlan = 0,
|
||||
}) {
|
||||
return this.getXapi(pool).createNetwork({
|
||||
name,
|
||||
description,
|
||||
pifId: pif && this.getObject(pif, 'PIF')._xapiId,
|
||||
mtu: +mtu,
|
||||
vlan: +vlan,
|
||||
})
|
||||
return xapiObjectToXo(
|
||||
await this.getXapi(pool).createNetwork({
|
||||
name,
|
||||
description,
|
||||
pifId: pif && this.getObject(pif, 'PIF')._xapiId,
|
||||
mtu: +mtu,
|
||||
vlan: +vlan,
|
||||
})
|
||||
).id
|
||||
}
|
||||
|
||||
create.params = {
|
||||
@@ -116,6 +119,9 @@ set.params = {
|
||||
type: 'boolean',
|
||||
optional: true,
|
||||
},
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
name_description: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
|
||||
@@ -9,7 +9,7 @@ import {
|
||||
unauthorized,
|
||||
} from 'xo-common/api-errors'
|
||||
|
||||
import { forEach, map, mapFilter, parseSize } from '../utils'
|
||||
import { forEach, map, mapFilter, parseSize, safeDateFormat } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -1137,10 +1137,15 @@ resume.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function revert({ snapshot, snapshotBefore }) {
|
||||
await this.checkPermissions(this.user.id, [
|
||||
[snapshot.$snapshot_of, 'operate'],
|
||||
])
|
||||
return this.getXapi(snapshot).revertVm(snapshot._xapiId, snapshotBefore)
|
||||
const { id: userId, permission } = this.user
|
||||
await this.checkPermissions(userId, [[snapshot.$snapshot_of, 'operate']])
|
||||
const newSnapshot = await this.getXapi(snapshot).revertVm(
|
||||
snapshot._xapiId,
|
||||
snapshotBefore
|
||||
)
|
||||
if (snapshotBefore && permission !== 'admin') {
|
||||
await this.addAcl(userId, newSnapshot.$id, 'admin')
|
||||
}
|
||||
}
|
||||
|
||||
revert.params = {
|
||||
@@ -1184,7 +1189,11 @@ async function export_({ vm, compress }) {
|
||||
|
||||
return {
|
||||
$getFrom: await this.registerHttpRequest(handleExport, data, {
|
||||
suffix: encodeURI(`/${vm.name_label}.xva`),
|
||||
suffix:
|
||||
'/' +
|
||||
encodeURIComponent(
|
||||
`${safeDateFormat(new Date())} - ${vm.name_label}.xva`
|
||||
),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -821,12 +821,14 @@ export const createSR = defer(async function(
|
||||
createSR.description = 'create gluster VM'
|
||||
createSR.permission = 'admin'
|
||||
createSR.params = {
|
||||
brickSize: { type: 'number', optional: true },
|
||||
srs: {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
template: { type: 'object' },
|
||||
pif: {
|
||||
type: 'string',
|
||||
},
|
||||
@@ -1162,11 +1164,11 @@ async function _prepareGlusterVm(
|
||||
}
|
||||
|
||||
async function _importGlusterVM(xapi, template, lvmsrId) {
|
||||
const templateStream = await this.requestResource(
|
||||
'xosan',
|
||||
template.id,
|
||||
template.version
|
||||
)
|
||||
const templateStream = await this.requestResource({
|
||||
id: template.id,
|
||||
namespace: 'xosan',
|
||||
version: template.version,
|
||||
})
|
||||
const newVM = await xapi.importVm(templateStream, {
|
||||
srId: lvmsrId,
|
||||
type: 'xva',
|
||||
@@ -1533,8 +1535,11 @@ export async function downloadAndInstallXosanPack({ id, version, pool }) {
|
||||
}
|
||||
|
||||
const xapi = this.getXapi(pool.id)
|
||||
const res = await this.requestResource('xosan', id, version)
|
||||
|
||||
const res = await this.requestResource({
|
||||
id,
|
||||
namespace: 'xosan',
|
||||
version,
|
||||
})
|
||||
await xapi.installSupplementalPackOnAllHosts(res)
|
||||
await xapi.pool.update_other_config(
|
||||
'xosan_pack_installation_time',
|
||||
|
||||
@@ -26,7 +26,12 @@ export const merge = (newValue, oldValue) => {
|
||||
|
||||
export const obfuscate = value => replace(value, OBFUSCATED_VALUE)
|
||||
|
||||
const SENSITIVE_PARAMS = { __proto__: null, password: true, token: true }
|
||||
const SENSITIVE_PARAMS = {
|
||||
__proto__: null,
|
||||
cifspassword: true,
|
||||
password: true,
|
||||
token: true,
|
||||
}
|
||||
|
||||
export function replace(value, replacement) {
|
||||
function helper(value, name) {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import * as sensitiveValues from './sensitive-values'
|
||||
import ensureArray from './_ensureArray'
|
||||
import {
|
||||
extractProperty,
|
||||
@@ -485,7 +486,10 @@ const TRANSFORMS = {
|
||||
attached: Boolean(obj.currently_attached),
|
||||
host: link(obj, 'host'),
|
||||
SR: link(obj, 'SR'),
|
||||
device_config: obj.device_config,
|
||||
device_config: sensitiveValues.replace(
|
||||
obj.device_config,
|
||||
'* obfuscated *'
|
||||
),
|
||||
otherConfig: obj.other_config,
|
||||
}
|
||||
},
|
||||
|
||||
@@ -734,9 +734,19 @@ export default class Xapi extends XapiBase {
|
||||
const { SR } = vdi
|
||||
let childrenMap = cache[SR]
|
||||
if (childrenMap === undefined) {
|
||||
const xapi = vdi.$xapi
|
||||
childrenMap = cache[SR] = groupBy(
|
||||
vdi.$SR.$VDIs,
|
||||
_ => _.sm_config['vhd-parent']
|
||||
vdi.$SR.VDIs,
|
||||
|
||||
// if for any reasons, the VDI is undefined, simply ignores it instead
|
||||
// of failing
|
||||
ref => {
|
||||
try {
|
||||
return xapi.getObjectByRef(ref).sm_config['vhd-parent']
|
||||
} catch (error) {
|
||||
log.warn('missing VDI in _assertHealthyVdiChain', { error })
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1682,12 +1692,15 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
|
||||
async createVdi({
|
||||
// blindly copying `sm_config` from another VDI can create problems,
|
||||
// therefore it is ignored by this method
|
||||
//
|
||||
// see https://github.com/vatesfr/xen-orchestra/issues/4482
|
||||
name_description,
|
||||
name_label,
|
||||
other_config = {},
|
||||
read_only = false,
|
||||
sharable = false,
|
||||
sm_config,
|
||||
SR,
|
||||
tags,
|
||||
type = 'user',
|
||||
@@ -1707,7 +1720,6 @@ export default class Xapi extends XapiBase {
|
||||
other_config,
|
||||
read_only: Boolean(read_only),
|
||||
sharable: Boolean(sharable),
|
||||
sm_config,
|
||||
SR: sr.$ref,
|
||||
tags,
|
||||
type,
|
||||
@@ -2029,6 +2041,7 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
@deferrable
|
||||
async createNetwork(
|
||||
$defer,
|
||||
@@ -2346,14 +2359,22 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
}
|
||||
|
||||
async assertConsistentHostServerTime(hostRef) {
|
||||
const delta =
|
||||
async _getHostServerTimeShift(hostRef) {
|
||||
return Math.abs(
|
||||
parseDateTime(await this.call('host.get_servertime', hostRef)).getTime() -
|
||||
Date.now()
|
||||
if (Math.abs(delta) > 30e3) {
|
||||
Date.now()
|
||||
)
|
||||
}
|
||||
|
||||
async isHostServerTimeConsistent(hostRef) {
|
||||
return (await this._getHostServerTimeShift(hostRef)) < 30e3
|
||||
}
|
||||
|
||||
async assertConsistentHostServerTime(hostRef) {
|
||||
if (!(await this.isHostServerTimeConsistent(hostRef))) {
|
||||
throw new Error(
|
||||
`host server time and XOA date are not consistent with each other (${ms(
|
||||
delta
|
||||
await this._getHostServerTimeShift(hostRef)
|
||||
)})`
|
||||
)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import { filter, find, pickBy, some } from 'lodash'
|
||||
|
||||
import ensureArray from '../../_ensureArray'
|
||||
import { debounce } from '../../decorators'
|
||||
import { debounceWithKey } from '../../_pDebounceWithKey'
|
||||
import { forEach, mapFilter, mapToArray, parseXml } from '../../utils'
|
||||
|
||||
import { extractOpaqueRef, useUpdateSystem } from '../utils'
|
||||
@@ -35,6 +36,28 @@ const log = createLogger('xo:xapi')
|
||||
|
||||
const _isXcp = host => host.software_version.product_brand === 'XCP-ng'
|
||||
|
||||
const XCP_NG_DEBOUNCE_TIME_MS = 60000
|
||||
|
||||
// list all yum updates available for a XCP-ng host
|
||||
// (hostObject) → { uuid: patchObject }
|
||||
async function _listXcpUpdates(host) {
|
||||
return JSON.parse(
|
||||
await this.call(
|
||||
'host.call_plugin',
|
||||
host.$ref,
|
||||
'updater.py',
|
||||
'check_update',
|
||||
{}
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
const _listXcpUpdateDebounced = debounceWithKey(
|
||||
_listXcpUpdates,
|
||||
XCP_NG_DEBOUNCE_TIME_MS,
|
||||
host => host.$ref
|
||||
)
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
@@ -141,19 +164,8 @@ export default {
|
||||
|
||||
// LIST ----------------------------------------------------------------------
|
||||
|
||||
// list all yum updates available for a XCP-ng host
|
||||
// (hostObject) → { uuid: patchObject }
|
||||
async _listXcpUpdates(host) {
|
||||
return JSON.parse(
|
||||
await this.call(
|
||||
'host.call_plugin',
|
||||
host.$ref,
|
||||
'updater.py',
|
||||
'check_update',
|
||||
{}
|
||||
)
|
||||
)
|
||||
},
|
||||
_listXcpUpdates,
|
||||
_listXcpUpdateDebounced,
|
||||
|
||||
// list all patches provided by Citrix for this host version regardless
|
||||
// of if they're installed or not
|
||||
@@ -255,7 +267,7 @@ export default {
|
||||
)) !== undefined
|
||||
) {
|
||||
if (getAll) {
|
||||
log(
|
||||
log.debug(
|
||||
`patch ${patch.name} (${id}) conflicts with installed patch ${conflictId}`
|
||||
)
|
||||
return
|
||||
@@ -271,7 +283,7 @@ export default {
|
||||
)) !== undefined
|
||||
) {
|
||||
if (getAll) {
|
||||
log(`patches ${id} and ${conflictId} conflict with eachother`)
|
||||
log.debug(`patches ${id} and ${conflictId} conflict with eachother`)
|
||||
return
|
||||
}
|
||||
throw new Error(
|
||||
@@ -306,7 +318,7 @@ export default {
|
||||
listMissingPatches(hostId) {
|
||||
const host = this.getObject(hostId)
|
||||
return _isXcp(host)
|
||||
? this._listXcpUpdates(host)
|
||||
? this._listXcpUpdateDebounced(host)
|
||||
: // TODO: list paid patches of free hosts as well so the UI can show them
|
||||
this._listInstallablePatches(host)
|
||||
},
|
||||
|
||||
@@ -276,19 +276,20 @@ export default {
|
||||
if (virtualizationMode !== 'pv' && virtualizationMode !== 'hvm') {
|
||||
throw new Error(`The virtualization mode must be 'pv' or 'hvm'`)
|
||||
}
|
||||
return vm
|
||||
.set_domain_type(virtualizationMode)
|
||||
::pCatch({ code: 'MESSAGE_METHOD_UNKNOWN' }, () =>
|
||||
vm.set_HVM_boot_policy(
|
||||
return vm.set_domain_type !== undefined
|
||||
? vm.set_domain_type(virtualizationMode)
|
||||
: vm.set_HVM_boot_policy(
|
||||
virtualizationMode === 'hvm' ? 'Boot order' : ''
|
||||
)
|
||||
)
|
||||
},
|
||||
},
|
||||
|
||||
coresPerSocket: {
|
||||
set: (coresPerSocket, vm) =>
|
||||
vm.update_platform('cores-per-socket', String(coresPerSocket)),
|
||||
vm.update_platform(
|
||||
'cores-per-socket',
|
||||
coresPerSocket !== null ? String(coresPerSocket) : null
|
||||
),
|
||||
},
|
||||
|
||||
CPUs: 'cpus',
|
||||
@@ -314,7 +315,8 @@ export default {
|
||||
|
||||
cpuCap: {
|
||||
get: vm => vm.VCPUs_params.cap && +vm.VCPUs_params.cap,
|
||||
set: (cap, vm) => vm.update_VCPUs_params('cap', String(cap)),
|
||||
set: (cap, vm) =>
|
||||
vm.update_VCPUs_params('cap', cap !== null ? String(cap) : null),
|
||||
},
|
||||
|
||||
cpuMask: {
|
||||
@@ -463,8 +465,9 @@ export default {
|
||||
|
||||
async revertVm(snapshotId, snapshotBefore = true) {
|
||||
const snapshot = this.getObject(snapshotId)
|
||||
let newSnapshot
|
||||
if (snapshotBefore) {
|
||||
await this._snapshotVm(snapshot.$snapshot_of)
|
||||
newSnapshot = await this._snapshotVm(snapshot.$snapshot_of)
|
||||
}
|
||||
await this.callAsync('VM.revert', snapshot.$ref)
|
||||
if (snapshot.snapshot_info['power-state-at-snapshot'] === 'Running') {
|
||||
@@ -475,6 +478,7 @@ export default {
|
||||
this.resumeVm(vm.$id)::ignoreErrors()
|
||||
}
|
||||
}
|
||||
return newSnapshot
|
||||
},
|
||||
|
||||
async resumeVm(vmId) {
|
||||
|
||||
@@ -332,7 +332,7 @@ export const makeEditObject = specs => {
|
||||
|
||||
export const useUpdateSystem = host => {
|
||||
// Match Xen Center's condition: https://github.com/xenserver/xenadmin/blob/f3a64fc54bbff239ca6f285406d9034f57537d64/XenModel/Utils/Helpers.cs#L420
|
||||
return versionSatisfies(host.software_version.platform_version, '^2.1.1')
|
||||
return versionSatisfies(host.software_version.platform_version, '>=2.1.1')
|
||||
}
|
||||
|
||||
export const canSrHaveNewVdiOfSize = (sr, minSize) =>
|
||||
|
||||
@@ -3,6 +3,7 @@ import kindOf from 'kindof'
|
||||
import ms from 'ms'
|
||||
import schemaInspector from 'schema-inspector'
|
||||
import { forEach, isFunction } from 'lodash'
|
||||
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
|
||||
import { MethodNotFound } from 'json-rpc-peer'
|
||||
|
||||
import * as methods from '../api'
|
||||
@@ -219,17 +220,29 @@ export default class Api {
|
||||
throw new MethodNotFound(name)
|
||||
}
|
||||
|
||||
// FIXME: it can cause issues if there any property assignments in
|
||||
// XO methods called from the API.
|
||||
const context = Object.create(xo, {
|
||||
api: {
|
||||
// Used by system.*().
|
||||
value: this,
|
||||
},
|
||||
session: {
|
||||
value: session,
|
||||
},
|
||||
})
|
||||
// create the context which is an augmented XO
|
||||
const context = (() => {
|
||||
const descriptors = {
|
||||
api: {
|
||||
// Used by system.*().
|
||||
value: this,
|
||||
},
|
||||
session: {
|
||||
value: session,
|
||||
},
|
||||
}
|
||||
|
||||
let obj = xo
|
||||
do {
|
||||
Object.getOwnPropertyNames(obj).forEach(name => {
|
||||
if (!(name in descriptors)) {
|
||||
descriptors[name] = getBoundPropertyDescriptor(obj, name, xo)
|
||||
}
|
||||
})
|
||||
} while ((obj = Reflect.getPrototypeOf(obj)) !== null)
|
||||
|
||||
return Object.create(null, descriptors)
|
||||
})()
|
||||
|
||||
// Fetch and inject the current user.
|
||||
const userId = session.get('user_id', undefined)
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import ms from 'ms'
|
||||
import { forEach, isEmpty, iteratee, sortedIndexBy } from 'lodash'
|
||||
|
||||
import { debounceWithKey } from '../_pDebounceWithKey'
|
||||
|
||||
const isSkippedError = error =>
|
||||
error.message === 'no disks found' ||
|
||||
error.message === 'no VMs match this pattern' ||
|
||||
@@ -64,131 +66,138 @@ const taskTimeComparator = ({ start: s1, end: e1 }, { start: s2, end: e2 }) => {
|
||||
// tasks?: Task[],
|
||||
// }
|
||||
export default {
|
||||
async getBackupNgLogs(runId?: string) {
|
||||
const [jobLogs, restoreLogs, restoreMetadataLogs] = await Promise.all([
|
||||
this.getLogs('jobs'),
|
||||
this.getLogs('restore'),
|
||||
this.getLogs('metadataRestore'),
|
||||
])
|
||||
getBackupNgLogs: debounceWithKey(
|
||||
async function getBackupNgLogs(runId?: string) {
|
||||
const [jobLogs, restoreLogs, restoreMetadataLogs] = await Promise.all([
|
||||
this.getLogs('jobs'),
|
||||
this.getLogs('restore'),
|
||||
this.getLogs('metadataRestore'),
|
||||
])
|
||||
|
||||
const { runningJobs, runningRestores, runningMetadataRestores } = this
|
||||
const consolidated = {}
|
||||
const started = {}
|
||||
const { runningJobs, runningRestores, runningMetadataRestores } = this
|
||||
const consolidated = {}
|
||||
const started = {}
|
||||
|
||||
const handleLog = ({ data, time, message }, id) => {
|
||||
const { event } = data
|
||||
if (event === 'job.start') {
|
||||
if (
|
||||
(data.type === 'backup' || data.key === undefined) &&
|
||||
(runId === undefined || runId === id)
|
||||
) {
|
||||
const { scheduleId, jobId } = data
|
||||
consolidated[id] = started[id] = {
|
||||
const handleLog = ({ data, time, message }, id) => {
|
||||
const { event } = data
|
||||
if (event === 'job.start') {
|
||||
if (
|
||||
(data.type === 'backup' || data.key === undefined) &&
|
||||
(runId === undefined || runId === id)
|
||||
) {
|
||||
const { scheduleId, jobId } = data
|
||||
consolidated[id] = started[id] = {
|
||||
data: data.data,
|
||||
id,
|
||||
jobId,
|
||||
jobName: data.jobName,
|
||||
message: 'backup',
|
||||
scheduleId,
|
||||
start: time,
|
||||
status: runningJobs[jobId] === id ? 'pending' : 'interrupted',
|
||||
}
|
||||
}
|
||||
} else if (event === 'job.end') {
|
||||
const { runJobId } = data
|
||||
const log = started[runJobId]
|
||||
if (log !== undefined) {
|
||||
delete started[runJobId]
|
||||
log.end = time
|
||||
log.status = computeStatusAndSortTasks(
|
||||
getStatus((log.result = data.error)),
|
||||
log.tasks
|
||||
)
|
||||
}
|
||||
} else if (event === 'task.start') {
|
||||
const task = {
|
||||
data: data.data,
|
||||
id,
|
||||
jobId,
|
||||
jobName: data.jobName,
|
||||
message: 'backup',
|
||||
scheduleId,
|
||||
message,
|
||||
start: time,
|
||||
status: runningJobs[jobId] === id ? 'pending' : 'interrupted',
|
||||
}
|
||||
const { parentId } = data
|
||||
let parent
|
||||
if (parentId === undefined && (runId === undefined || runId === id)) {
|
||||
// top level task
|
||||
task.status =
|
||||
(message === 'restore' && !runningRestores.has(id)) ||
|
||||
(message === 'metadataRestore' &&
|
||||
!runningMetadataRestores.has(id))
|
||||
? 'interrupted'
|
||||
: 'pending'
|
||||
consolidated[id] = started[id] = task
|
||||
} else if ((parent = started[parentId]) !== undefined) {
|
||||
// sub-task for which the parent exists
|
||||
task.status = parent.status
|
||||
started[id] = task
|
||||
;(parent.tasks || (parent.tasks = [])).push(task)
|
||||
}
|
||||
} else if (event === 'task.end') {
|
||||
const { taskId } = data
|
||||
const log = started[taskId]
|
||||
if (log !== undefined) {
|
||||
// TODO: merge/transfer work-around
|
||||
delete started[taskId]
|
||||
log.end = time
|
||||
log.status = computeStatusAndSortTasks(
|
||||
getStatus((log.result = data.result), data.status),
|
||||
log.tasks
|
||||
)
|
||||
}
|
||||
} else if (event === 'task.warning') {
|
||||
const parent = started[data.taskId]
|
||||
parent !== undefined &&
|
||||
(parent.warnings || (parent.warnings = [])).push({
|
||||
data: data.data,
|
||||
message,
|
||||
})
|
||||
} else if (event === 'task.info') {
|
||||
const parent = started[data.taskId]
|
||||
parent !== undefined &&
|
||||
(parent.infos || (parent.infos = [])).push({
|
||||
data: data.data,
|
||||
message,
|
||||
})
|
||||
} else if (event === 'jobCall.start') {
|
||||
const parent = started[data.runJobId]
|
||||
if (parent !== undefined) {
|
||||
;(parent.tasks || (parent.tasks = [])).push(
|
||||
(started[id] = {
|
||||
data: {
|
||||
type: 'VM',
|
||||
id: data.params.id,
|
||||
},
|
||||
id,
|
||||
start: time,
|
||||
status: parent.status,
|
||||
})
|
||||
)
|
||||
}
|
||||
} else if (event === 'jobCall.end') {
|
||||
const { runCallId } = data
|
||||
const log = started[runCallId]
|
||||
if (log !== undefined) {
|
||||
delete started[runCallId]
|
||||
log.end = time
|
||||
log.status = computeStatusAndSortTasks(
|
||||
getStatus((log.result = data.error)),
|
||||
log.tasks
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if (event === 'job.end') {
|
||||
const { runJobId } = data
|
||||
const log = started[runJobId]
|
||||
if (log !== undefined) {
|
||||
delete started[runJobId]
|
||||
log.end = time
|
||||
log.status = computeStatusAndSortTasks(
|
||||
getStatus((log.result = data.error)),
|
||||
log.tasks
|
||||
)
|
||||
}
|
||||
} else if (event === 'task.start') {
|
||||
const task = {
|
||||
data: data.data,
|
||||
id,
|
||||
message,
|
||||
start: time,
|
||||
}
|
||||
const { parentId } = data
|
||||
let parent
|
||||
if (parentId === undefined && (runId === undefined || runId === id)) {
|
||||
// top level task
|
||||
task.status =
|
||||
(message === 'restore' && !runningRestores.has(id)) ||
|
||||
(message === 'metadataRestore' && !runningMetadataRestores.has(id))
|
||||
? 'interrupted'
|
||||
: 'pending'
|
||||
consolidated[id] = started[id] = task
|
||||
} else if ((parent = started[parentId]) !== undefined) {
|
||||
// sub-task for which the parent exists
|
||||
task.status = parent.status
|
||||
started[id] = task
|
||||
;(parent.tasks || (parent.tasks = [])).push(task)
|
||||
}
|
||||
} else if (event === 'task.end') {
|
||||
const { taskId } = data
|
||||
const log = started[taskId]
|
||||
if (log !== undefined) {
|
||||
// TODO: merge/transfer work-around
|
||||
delete started[taskId]
|
||||
log.end = time
|
||||
log.status = computeStatusAndSortTasks(
|
||||
getStatus((log.result = data.result), data.status),
|
||||
log.tasks
|
||||
)
|
||||
}
|
||||
} else if (event === 'task.warning') {
|
||||
const parent = started[data.taskId]
|
||||
parent !== undefined &&
|
||||
(parent.warnings || (parent.warnings = [])).push({
|
||||
data: data.data,
|
||||
message,
|
||||
})
|
||||
} else if (event === 'task.info') {
|
||||
const parent = started[data.taskId]
|
||||
parent !== undefined &&
|
||||
(parent.infos || (parent.infos = [])).push({
|
||||
data: data.data,
|
||||
message,
|
||||
})
|
||||
} else if (event === 'jobCall.start') {
|
||||
const parent = started[data.runJobId]
|
||||
if (parent !== undefined) {
|
||||
;(parent.tasks || (parent.tasks = [])).push(
|
||||
(started[id] = {
|
||||
data: {
|
||||
type: 'VM',
|
||||
id: data.params.id,
|
||||
},
|
||||
id,
|
||||
start: time,
|
||||
status: parent.status,
|
||||
})
|
||||
)
|
||||
}
|
||||
} else if (event === 'jobCall.end') {
|
||||
const { runCallId } = data
|
||||
const log = started[runCallId]
|
||||
if (log !== undefined) {
|
||||
delete started[runCallId]
|
||||
log.end = time
|
||||
log.status = computeStatusAndSortTasks(
|
||||
getStatus((log.result = data.error)),
|
||||
log.tasks
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
forEach(jobLogs, handleLog)
|
||||
forEach(restoreLogs, handleLog)
|
||||
forEach(restoreMetadataLogs, handleLog)
|
||||
|
||||
return runId === undefined ? consolidated : consolidated[runId]
|
||||
},
|
||||
10e3,
|
||||
function keyFn(runId) {
|
||||
return [this, runId]
|
||||
}
|
||||
|
||||
forEach(jobLogs, handleLog)
|
||||
forEach(restoreLogs, handleLog)
|
||||
forEach(restoreMetadataLogs, handleLog)
|
||||
|
||||
return runId === undefined ? consolidated : consolidated[runId]
|
||||
},
|
||||
),
|
||||
|
||||
async getBackupNgLogsSorted({ after, before, filter, limit }) {
|
||||
let logs = await this.getBackupNgLogs()
|
||||
|
||||
@@ -44,6 +44,7 @@ import { type Schedule } from '../scheduling'
|
||||
|
||||
import createSizeStream from '../../size-stream'
|
||||
import parseDuration from '../../_parseDuration'
|
||||
import { debounceWithKey } from '../../_pDebounceWithKey'
|
||||
import {
|
||||
type DeltaVmExport,
|
||||
type DeltaVmImport,
|
||||
@@ -821,56 +822,66 @@ export default class BackupNg {
|
||||
)()
|
||||
}
|
||||
|
||||
@debounceWithKey.decorate(10e3, function keyFn(remoteId) {
|
||||
return [this, remoteId]
|
||||
})
|
||||
async _listVmBackupsOnRemote(remoteId: string) {
|
||||
const app = this._app
|
||||
const backupsByVm = {}
|
||||
try {
|
||||
const handler = await app.getRemoteHandler(remoteId)
|
||||
|
||||
const entries = (await handler.list(BACKUP_DIR).catch(error => {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
return []
|
||||
})).filter(name => name !== 'index.json')
|
||||
|
||||
await Promise.all(
|
||||
entries.map(async vmUuid => {
|
||||
// $FlowFixMe don't know what is the problem (JFT)
|
||||
const backups = await this._listVmBackups(handler, vmUuid)
|
||||
|
||||
if (backups.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
// inject an id usable by importVmBackupNg()
|
||||
backups.forEach(backup => {
|
||||
backup.id = `${remoteId}/${backup._filename}`
|
||||
|
||||
const { vdis, vhds } = backup
|
||||
backup.disks =
|
||||
vhds === undefined
|
||||
? []
|
||||
: Object.keys(vhds).map(vdiId => {
|
||||
const vdi = vdis[vdiId]
|
||||
return {
|
||||
id: `${dirname(backup._filename)}/${vhds[vdiId]}`,
|
||||
name: vdi.name_label,
|
||||
uuid: vdi.uuid,
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
backupsByVm[vmUuid] = backups
|
||||
})
|
||||
)
|
||||
} catch (error) {
|
||||
log.warn(`listVmBackups for remote ${remoteId}:`, { error })
|
||||
}
|
||||
return backupsByVm
|
||||
}
|
||||
|
||||
async listVmBackupsNg(remotes: string[]) {
|
||||
const backupsByVmByRemote: $Dict<$Dict<Metadata[]>> = {}
|
||||
|
||||
const app = this._app
|
||||
await Promise.all(
|
||||
remotes.map(async remoteId => {
|
||||
try {
|
||||
const handler = await app.getRemoteHandler(remoteId)
|
||||
|
||||
const entries = (await handler.list(BACKUP_DIR).catch(error => {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
return []
|
||||
})).filter(name => name !== 'index.json')
|
||||
|
||||
const backupsByVm = (backupsByVmByRemote[remoteId] = {})
|
||||
await Promise.all(
|
||||
entries.map(async vmUuid => {
|
||||
// $FlowFixMe don't know what is the problem (JFT)
|
||||
const backups = await this._listVmBackups(handler, vmUuid)
|
||||
|
||||
if (backups.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
// inject an id usable by importVmBackupNg()
|
||||
backups.forEach(backup => {
|
||||
backup.id = `${remoteId}/${backup._filename}`
|
||||
|
||||
const { vdis, vhds } = backup
|
||||
backup.disks =
|
||||
vhds === undefined
|
||||
? []
|
||||
: Object.keys(vhds).map(vdiId => {
|
||||
const vdi = vdis[vdiId]
|
||||
return {
|
||||
id: `${dirname(backup._filename)}/${vhds[vdiId]}`,
|
||||
name: vdi.name_label,
|
||||
uuid: vdi.uuid,
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
backupsByVm[vmUuid] = backups
|
||||
})
|
||||
)
|
||||
} catch (error) {
|
||||
log.warn(`listVmBackups for remote ${remoteId}:`, { error })
|
||||
}
|
||||
backupsByVmByRemote[remoteId] = await this._listVmBackupsOnRemote(
|
||||
remoteId
|
||||
)
|
||||
})
|
||||
)
|
||||
|
||||
@@ -1146,6 +1157,21 @@ export default class BackupNg {
|
||||
$defer.call(xapi, 'deleteVm', snapshot)
|
||||
}
|
||||
|
||||
let compress = getJobCompression(job)
|
||||
const pool = snapshot.$pool
|
||||
if (
|
||||
compress === 'zstd' &&
|
||||
pool.restrictions.restrict_zstd_export !== 'false'
|
||||
) {
|
||||
compress = false
|
||||
logger.warning(
|
||||
`Zstd is not supported on the pool ${pool.name_label}, the VM will be exported without compression`,
|
||||
{
|
||||
event: 'task.warning',
|
||||
taskId,
|
||||
}
|
||||
)
|
||||
}
|
||||
let xva: any = await wrapTask(
|
||||
{
|
||||
logger,
|
||||
@@ -1153,7 +1179,7 @@ export default class BackupNg {
|
||||
parentId: taskId,
|
||||
},
|
||||
xapi.exportVm($cancelToken, snapshot, {
|
||||
compress: getJobCompression(job),
|
||||
compress,
|
||||
})
|
||||
)
|
||||
const exportTask = xva.task
|
||||
|
||||
@@ -243,38 +243,17 @@ export default class Jobs {
|
||||
}
|
||||
|
||||
async _runJob(job: Job, schedule?: Schedule, data_?: any) {
|
||||
const { id } = job
|
||||
|
||||
const runningJobs = this._runningJobs
|
||||
if (id in runningJobs) {
|
||||
throw new Error(`job ${id} is already running`)
|
||||
}
|
||||
|
||||
const { type } = job
|
||||
const executor = this._executors[type]
|
||||
if (executor === undefined) {
|
||||
throw new Error(`cannot run job ${id}: no executor for type ${type}`)
|
||||
}
|
||||
|
||||
let data
|
||||
if (type === 'backup') {
|
||||
// $FlowFixMe only defined for BackupJob
|
||||
const settings = job.settings['']
|
||||
data = {
|
||||
// $FlowFixMe only defined for BackupJob
|
||||
mode: job.mode,
|
||||
reportWhen: (settings && settings.reportWhen) || 'failure',
|
||||
}
|
||||
}
|
||||
if (type === 'metadataBackup') {
|
||||
data = {
|
||||
reportWhen: job.settings['']?.reportWhen ?? 'failure',
|
||||
}
|
||||
}
|
||||
|
||||
const logger = this._logger
|
||||
const { id, type } = job
|
||||
const runJobId = logger.notice(`Starting execution of ${id}.`, {
|
||||
data,
|
||||
data:
|
||||
type === 'backup' || type === 'metadataBackup'
|
||||
? {
|
||||
// $FlowFixMe only defined for BackupJob
|
||||
mode: job.mode,
|
||||
reportWhen: job.settings['']?.reportWhen ?? 'failure',
|
||||
}
|
||||
: undefined,
|
||||
event: 'job.start',
|
||||
userId: job.userId,
|
||||
jobId: id,
|
||||
@@ -285,44 +264,64 @@ export default class Jobs {
|
||||
type,
|
||||
})
|
||||
|
||||
// runId is a temporary property used to check if the report is sent after the server interruption
|
||||
this.updateJob({ id, runId: runJobId })::ignoreErrors()
|
||||
runningJobs[id] = runJobId
|
||||
|
||||
const runs = this._runs
|
||||
|
||||
const { cancel, token } = CancelToken.source()
|
||||
runs[runJobId] = { cancel }
|
||||
|
||||
let session
|
||||
const app = this._app
|
||||
try {
|
||||
session = app.createUserConnection()
|
||||
session.set('user_id', job.userId)
|
||||
const runningJobs = this._runningJobs
|
||||
|
||||
const status = await executor({
|
||||
app,
|
||||
cancelToken: token,
|
||||
data: data_,
|
||||
job,
|
||||
logger,
|
||||
runJobId,
|
||||
schedule,
|
||||
session,
|
||||
})
|
||||
await logger.notice(
|
||||
`Execution terminated for ${job.id}.`,
|
||||
{
|
||||
event: 'job.end',
|
||||
if (id in runningJobs) {
|
||||
throw new Error(`the job (${id}) is already running`)
|
||||
}
|
||||
|
||||
const executor = this._executors[type]
|
||||
if (executor === undefined) {
|
||||
throw new Error(`cannot run job (${id}): no executor for type ${type}`)
|
||||
}
|
||||
|
||||
// runId is a temporary property used to check if the report is sent after the server interruption
|
||||
this.updateJob({ id, runId: runJobId })::ignoreErrors()
|
||||
runningJobs[id] = runJobId
|
||||
|
||||
const runs = this._runs
|
||||
let session
|
||||
try {
|
||||
const { cancel, token } = CancelToken.source()
|
||||
runs[runJobId] = { cancel }
|
||||
|
||||
session = app.createUserConnection()
|
||||
session.set('user_id', job.userId)
|
||||
|
||||
const status = await executor({
|
||||
app,
|
||||
cancelToken: token,
|
||||
data: data_,
|
||||
job,
|
||||
logger,
|
||||
runJobId,
|
||||
},
|
||||
true
|
||||
)
|
||||
schedule,
|
||||
session,
|
||||
})
|
||||
|
||||
app.emit('job:terminated', runJobId, {
|
||||
type: job.type,
|
||||
status,
|
||||
})
|
||||
await logger.notice(
|
||||
`Execution terminated for ${job.id}.`,
|
||||
{
|
||||
event: 'job.end',
|
||||
runJobId,
|
||||
},
|
||||
true
|
||||
)
|
||||
|
||||
app.emit('job:terminated', runJobId, {
|
||||
type: job.type,
|
||||
status,
|
||||
})
|
||||
} finally {
|
||||
this.updateJob({ id, runId: null })::ignoreErrors()
|
||||
delete runningJobs[id]
|
||||
delete runs[runJobId]
|
||||
if (session !== undefined) {
|
||||
session.close()
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
await logger.error(
|
||||
`The execution of ${id} has failed.`,
|
||||
@@ -337,13 +336,6 @@ export default class Jobs {
|
||||
type: job.type,
|
||||
})
|
||||
throw error
|
||||
} finally {
|
||||
this.updateJob({ id, runId: null })::ignoreErrors()
|
||||
delete runningJobs[id]
|
||||
delete runs[runJobId]
|
||||
if (session !== undefined) {
|
||||
session.close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import asyncMap from '@xen-orchestra/async-map'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import { fromEvent, ignoreErrors } from 'promise-toolbox'
|
||||
|
||||
import debounceWithKey from '../_pDebounceWithKey'
|
||||
import { debounceWithKey } from '../_pDebounceWithKey'
|
||||
import parseDuration from '../_parseDuration'
|
||||
import { type Xapi } from '../xapi'
|
||||
import {
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import { createSchedule } from '@xen-orchestra/cron'
|
||||
import { ignoreErrors } from 'promise-toolbox'
|
||||
import { keyBy } from 'lodash'
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
@@ -155,7 +156,9 @@ export default class Scheduling {
|
||||
this._runs[id] = createSchedule(
|
||||
schedule.cron,
|
||||
schedule.timezone
|
||||
).startJob(() => this._app.runJobSequence([schedule.jobId], schedule))
|
||||
).startJob(() => {
|
||||
ignoreErrors.call(this._app.runJobSequence([schedule.jobId], schedule))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -293,6 +293,10 @@ export default class {
|
||||
async connectXenServer(id) {
|
||||
const server = (await this._getXenServer(id)).properties
|
||||
|
||||
if (this._getXenServerStatus(id) !== 'disconnected') {
|
||||
throw new Error('the server is already connected')
|
||||
}
|
||||
|
||||
const xapi = (this._xapis[server.id] = new Xapi({
|
||||
allowUnauthorized: server.allowUnauthorized,
|
||||
readOnly: server.readOnly,
|
||||
|
||||
@@ -166,20 +166,16 @@ export default class Xo extends EventEmitter {
|
||||
|
||||
async registerHttpRequest(fn, data, { suffix = '' } = {}) {
|
||||
const { _httpRequestWatchers: watchers } = this
|
||||
let url
|
||||
|
||||
const url = await (function generateUniqueUrl() {
|
||||
return generateToken().then(token => {
|
||||
const url = `/api/${token}${suffix}`
|
||||
|
||||
return url in watchers ? generateUniqueUrl() : url
|
||||
})
|
||||
})()
|
||||
do {
|
||||
url = `/api/${await generateToken()}${suffix}`
|
||||
} while (url in watchers)
|
||||
|
||||
watchers[url] = {
|
||||
data,
|
||||
fn,
|
||||
}
|
||||
|
||||
return url
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
"fs-extra": "^8.0.1",
|
||||
"get-stream": "^5.1.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -261,7 +261,11 @@ gulp.task(function buildScripts() {
|
||||
],
|
||||
}),
|
||||
require('gulp-sourcemaps').init({ loadMaps: true }),
|
||||
PRODUCTION && require('gulp-uglify/composer')(require('uglify-es'))(),
|
||||
PRODUCTION &&
|
||||
require('gulp-uglify/composer')(require('uglify-es'))({
|
||||
// 2019-09-04 Disabling inline optimization until https://github.com/mishoo/UglifyJS2/issues/2842 is fixed
|
||||
compress: { inline: false },
|
||||
}),
|
||||
dest()
|
||||
)
|
||||
})
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "xo-web",
|
||||
"version": "5.46.0",
|
||||
"version": "5.50.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Web interface client for Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -32,8 +32,9 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@nraynaud/novnc": "0.6.1",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/cron": "^1.0.4",
|
||||
"@xen-orchestra/defined": "^0.0.0",
|
||||
"@xen-orchestra/template": "^0.1.0",
|
||||
"ansi_up": "^4.0.3",
|
||||
"asap": "^2.0.6",
|
||||
"babel-core": "^6.26.0",
|
||||
@@ -71,7 +72,7 @@
|
||||
"font-mfizz": "^2.4.1",
|
||||
"get-stream": "^4.0.0",
|
||||
"gulp": "^4.0.0",
|
||||
"gulp-autoprefixer": "^6.0.0",
|
||||
"gulp-autoprefixer": "^7.0.0",
|
||||
"gulp-csso": "^3.0.0",
|
||||
"gulp-embedlr": "^0.5.2",
|
||||
"gulp-plumber": "^1.1.0",
|
||||
@@ -90,7 +91,7 @@
|
||||
"lodash": "^4.6.1",
|
||||
"loose-envify": "^1.1.0",
|
||||
"make-error": "^1.3.2",
|
||||
"marked": "^0.6.0",
|
||||
"marked": "^0.7.0",
|
||||
"modular-cssify": "^12",
|
||||
"moment": "^2.20.1",
|
||||
"moment-timezone": "^0.5.14",
|
||||
@@ -127,7 +128,7 @@
|
||||
"redux": "^4.0.0",
|
||||
"redux-thunk": "^2.0.1",
|
||||
"reselect": "^2.5.4",
|
||||
"rimraf": "^2.6.2",
|
||||
"rimraf": "^3.0.0",
|
||||
"semver": "^6.0.0",
|
||||
"styled-components": "^3.1.5",
|
||||
"uglify-es": "^3.3.4",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user