Compare commits
454 Commits
xen-api-v0
...
xo-server-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d867524c6b | ||
|
|
5edf9bde78 | ||
|
|
770ea55872 | ||
|
|
4eb0101c5b | ||
|
|
5d7af94abf | ||
|
|
b729b8f7c8 | ||
|
|
064e69d943 | ||
|
|
d880931951 | ||
|
|
f24741cd32 | ||
|
|
45c7017e83 | ||
|
|
7cfb891e6b | ||
|
|
fc8604e896 | ||
|
|
6b5e94103d | ||
|
|
aee4679ae5 | ||
|
|
2c2c930fce | ||
|
|
3f309e4db5 | ||
|
|
d26be402db | ||
|
|
a571e83005 | ||
|
|
10d5228eb2 | ||
|
|
7ed49b476f | ||
|
|
5396b90695 | ||
|
|
a6983d4e7b | ||
|
|
a3d1c76f67 | ||
|
|
15fab226b7 | ||
|
|
5a065d5a05 | ||
|
|
de81f3ffbb | ||
|
|
9103369cf6 | ||
|
|
7be36e6d0d | ||
|
|
a00e3e6f41 | ||
|
|
82ba02b4f3 | ||
|
|
d70ae6ebe3 | ||
|
|
f6c411a261 | ||
|
|
b606eaf9ee | ||
|
|
516edd1b09 | ||
|
|
e31c3b1f27 | ||
|
|
619818f968 | ||
|
|
79a80a1adf | ||
|
|
7cef48b995 | ||
|
|
7d3d1b1544 | ||
|
|
3f935f271d | ||
|
|
89935a1517 | ||
|
|
c67af4fb2f | ||
|
|
0b4adc36a0 | ||
|
|
44776b795f | ||
|
|
bec73a1c43 | ||
|
|
6ce35fdfa8 | ||
|
|
dabc2d0442 | ||
|
|
0527d3bc2b | ||
|
|
a7cfb71070 | ||
|
|
52003bedb4 | ||
|
|
a02fb8e739 | ||
|
|
60fad187a2 | ||
|
|
e8cd1e070f | ||
|
|
de6620be12 | ||
|
|
72dee73faa | ||
|
|
d8ce27907d | ||
|
|
3d8891d518 | ||
|
|
97742ccdc2 | ||
|
|
82fec86179 | ||
|
|
be83b53875 | ||
|
|
85fda0c18b | ||
|
|
a89f8fbd9c | ||
|
|
efdfa1f2f7 | ||
|
|
5bd61e3fb0 | ||
|
|
a45f83b646 | ||
|
|
16135b8e37 | ||
|
|
b011e8656f | ||
|
|
215432be6c | ||
|
|
d373760412 | ||
|
|
a1de04e285 | ||
|
|
23e16732fd | ||
|
|
5efac84b8b | ||
|
|
2cbc7b7d7d | ||
|
|
b1acbaecc2 | ||
|
|
6d61e8efff | ||
|
|
482e6b3cb3 | ||
|
|
445b13ec29 | ||
|
|
116af372dc | ||
|
|
970952783c | ||
|
|
e59cf13456 | ||
|
|
d0cfddce19 | ||
|
|
30b2a8dd8d | ||
|
|
b811ee7e7e | ||
|
|
ebe7f6784a | ||
|
|
e40792378f | ||
|
|
cc9c8fb891 | ||
|
|
ca06c4d403 | ||
|
|
c8aa058ede | ||
|
|
34169d685e | ||
|
|
d5a9d36815 | ||
|
|
c7aaeca530 | ||
|
|
863e4f0c19 | ||
|
|
0226e0553d | ||
|
|
02995d278f | ||
|
|
78a2104bcc | ||
|
|
4e9d143996 | ||
|
|
0811e5c765 | ||
|
|
b2cf2edd43 | ||
|
|
db493f6887 | ||
|
|
2cd0dec480 | ||
|
|
29024888fb | ||
|
|
dbcaab2bc1 | ||
|
|
28d445ae1c | ||
|
|
530360f859 | ||
|
|
738c55bad0 | ||
|
|
4b09bc85f5 | ||
|
|
5bc67d3570 | ||
|
|
f7ae6222b7 | ||
|
|
1e50dab093 | ||
|
|
d1935bf778 | ||
|
|
70a346d11e | ||
|
|
fd39a2063d | ||
|
|
682512fffe | ||
|
|
b13f91ec8d | ||
|
|
a140fc09ac | ||
|
|
f403a7e753 | ||
|
|
dfe5f412eb | ||
|
|
033d784c52 | ||
|
|
62c3fa13ca | ||
|
|
ce338cb6ca | ||
|
|
003eadc8fd | ||
|
|
8782151c5d | ||
|
|
b22c74c5a8 | ||
|
|
254fa36c01 | ||
|
|
a3e4253005 | ||
|
|
2388593b8a | ||
|
|
cdced63c1b | ||
|
|
45e1d1ecef | ||
|
|
f44447ce71 | ||
|
|
238e9cd8cc | ||
|
|
e171d8ed0e | ||
|
|
bd3399e04b | ||
|
|
2b4443f333 | ||
|
|
ab6548122f | ||
|
|
f81573d999 | ||
|
|
84ccebb858 | ||
|
|
530bc50e7c | ||
|
|
57e490fc23 | ||
|
|
61e902c094 | ||
|
|
8378ba77d6 | ||
|
|
c9e30b74e2 | ||
|
|
af944fd2e3 | ||
|
|
bcc0e76f1d | ||
|
|
95078d250a | ||
|
|
4b16a2c0c5 | ||
|
|
b8524732ce | ||
|
|
814fee4f47 | ||
|
|
d641d35d5c | ||
|
|
7464d95b57 | ||
|
|
8924a64622 | ||
|
|
3d6aa667fe | ||
|
|
147c3d2e7b | ||
|
|
ac298c3be3 | ||
|
|
e88848c44a | ||
|
|
cd518e3e4c | ||
|
|
114d521636 | ||
|
|
24d4fad394 | ||
|
|
6d8785e689 | ||
|
|
508cbf0a82 | ||
|
|
c83f56166d | ||
|
|
7199e1a214 | ||
|
|
85d55e97e7 | ||
|
|
cc2c71c076 | ||
|
|
9ca273b2c4 | ||
|
|
b85c2f35b6 | ||
|
|
fdd79885f9 | ||
|
|
b2eb970796 | ||
|
|
3ee9c1b550 | ||
|
|
2566c24753 | ||
|
|
49e1b0ba7e | ||
|
|
453c329f14 | ||
|
|
abad2944fb | ||
|
|
27193f38f3 | ||
|
|
d3dc94e210 | ||
|
|
6dad860635 | ||
|
|
0362ac8909 | ||
|
|
e7b79f83d1 | ||
|
|
62379c1e41 | ||
|
|
23b422e3df | ||
|
|
f8e6dee635 | ||
|
|
c8e9b287f4 | ||
|
|
c9412dbcd0 | ||
|
|
77222e9e6b | ||
|
|
2827544409 | ||
|
|
9d0f24eae1 | ||
|
|
db0a399da1 | ||
|
|
6e527947be | ||
|
|
e7051c1129 | ||
|
|
3196c7ca09 | ||
|
|
0e1e32d241 | ||
|
|
a34912fb0d | ||
|
|
c7c6e0e2ff | ||
|
|
1e529c995a | ||
|
|
7be1c7a47b | ||
|
|
b17380443b | ||
|
|
59e68682bd | ||
|
|
b7a92cfe92 | ||
|
|
5ebe27da49 | ||
|
|
42df6ba6fa | ||
|
|
8210fddfab | ||
|
|
f55ed273c5 | ||
|
|
d67e95af7b | ||
|
|
0b0f235252 | ||
|
|
36a5f52068 | ||
|
|
31266728f7 | ||
|
|
87d2096ed7 | ||
|
|
8c79ea4ce3 | ||
|
|
c73a4204cb | ||
|
|
0b3c2cc252 | ||
|
|
2bd3ca1d0b | ||
|
|
ce8649d991 | ||
|
|
9bd563b111 | ||
|
|
6ceb924a85 | ||
|
|
c2ef0ded43 | ||
|
|
6081a6f6db | ||
|
|
a0d92a0b1d | ||
|
|
3cf1f7ede2 | ||
|
|
5757afa1d8 | ||
|
|
86e9b9c1b8 | ||
|
|
1cdd1fa00e | ||
|
|
9d12759c68 | ||
|
|
d47f66548d | ||
|
|
594341fab6 | ||
|
|
4e88125cbe | ||
|
|
13237180a2 | ||
|
|
f64d7e0b6e | ||
|
|
040a6930a4 | ||
|
|
c54b9189a6 | ||
|
|
8882f1b019 | ||
|
|
ae6416c4d2 | ||
|
|
8faed87656 | ||
|
|
0983f05969 | ||
|
|
d43e2544a1 | ||
|
|
ca83d11ac8 | ||
|
|
1cdcdd9b5f | ||
|
|
cc7806e35b | ||
|
|
0ee48b6623 | ||
|
|
8c02e0efbd | ||
|
|
34d3ca82bc | ||
|
|
43822d3667 | ||
|
|
f4ac73b3b4 | ||
|
|
f084b6def9 | ||
|
|
a00d101ff7 | ||
|
|
9d5900d9b6 | ||
|
|
28fb4e8216 | ||
|
|
bec4dbe652 | ||
|
|
72cc14f508 | ||
|
|
d20941cc2c | ||
|
|
9cb8a05316 | ||
|
|
dccd799f6d | ||
|
|
b42b3d1b01 | ||
|
|
a40d6f772a | ||
|
|
6e9bfd18d9 | ||
|
|
3b92dd0139 | ||
|
|
564d53610a | ||
|
|
b4c7b8ac7f | ||
|
|
7acd90307b | ||
|
|
d3ec76c19f | ||
|
|
fb9425e503 | ||
|
|
688cb20674 | ||
|
|
c63be20bea | ||
|
|
df36633223 | ||
|
|
3597621d88 | ||
|
|
8387684839 | ||
|
|
f261f395f1 | ||
|
|
f27170ff0e | ||
|
|
d82c951db6 | ||
|
|
41ca853e03 | ||
|
|
d75580e11d | ||
|
|
a08d098265 | ||
|
|
a64960ddd0 | ||
|
|
875681b8ce | ||
|
|
a03dcbbf55 | ||
|
|
97cabbbc69 | ||
|
|
13725a9e21 | ||
|
|
f47df961f7 | ||
|
|
2f644d5eeb | ||
|
|
4b292bb78c | ||
|
|
804891cc81 | ||
|
|
d335e06371 | ||
|
|
477058ad23 | ||
|
|
eb3b68401d | ||
|
|
865d2df124 | ||
|
|
88160bae1d | ||
|
|
f581e93b88 | ||
|
|
876850a7a7 | ||
|
|
21a7cf7158 | ||
|
|
5edee4bae0 | ||
|
|
916ca5576a | ||
|
|
6c861bfd1f | ||
|
|
56961b55bd | ||
|
|
cdcd7154ba | ||
|
|
654a2ee870 | ||
|
|
903634073a | ||
|
|
0d4818feb6 | ||
|
|
d6aa40679b | ||
|
|
b7cc31c94d | ||
|
|
6860156b6f | ||
|
|
29486c9ce2 | ||
|
|
7cfa6a5da4 | ||
|
|
2563be472b | ||
|
|
7289e856d9 | ||
|
|
975de1954e | ||
|
|
95bcf0c080 | ||
|
|
f900a5ef4f | ||
|
|
7f1ab529ae | ||
|
|
49fc86e4b1 | ||
|
|
924aef84f1 | ||
|
|
96e6e2b72a | ||
|
|
71997d4e65 | ||
|
|
447f2f9506 | ||
|
|
79aef9024b | ||
|
|
fdf6f4fdf3 | ||
|
|
4d1eaaaade | ||
|
|
bdad6c0f6d | ||
|
|
ff1ca5d933 | ||
|
|
2cf4c494a4 | ||
|
|
95ac0a861a | ||
|
|
746c301f39 | ||
|
|
6455b12b58 | ||
|
|
485b8fe993 | ||
|
|
d7527f280c | ||
|
|
d57fa4375d | ||
|
|
d9e42c6625 | ||
|
|
28293d3fce | ||
|
|
d505401446 | ||
|
|
fafc24aeae | ||
|
|
f78ef0d208 | ||
|
|
8384cc3652 | ||
|
|
60aa18a229 | ||
|
|
0b689d99fa | ||
|
|
cd0064d19c | ||
|
|
b4baa6cd7b | ||
|
|
1ab2cdeed3 | ||
|
|
83c0281a33 | ||
|
|
437b0b0240 | ||
|
|
5c48697eda | ||
|
|
0feea5b7a6 | ||
|
|
9eb27fdd5e | ||
|
|
6e4a64232a | ||
|
|
4bbedeeea9 | ||
|
|
b5c004e870 | ||
|
|
a0ef1ab4f4 | ||
|
|
c9172a11a8 | ||
|
|
a0feee912e | ||
|
|
8e42b7b891 | ||
|
|
147d7e773f | ||
|
|
759ab1c5ee | ||
|
|
4c1581d845 | ||
|
|
e1c6e4347a | ||
|
|
256f117bbf | ||
|
|
3b0acf82c7 | ||
|
|
3a12f3d6c7 | ||
|
|
335ac5a595 | ||
|
|
d0e2e97007 | ||
|
|
85e1baa2dc | ||
|
|
0c66c39211 | ||
|
|
250afa38ca | ||
|
|
b7e58eeb3f | ||
|
|
6f024d78a6 | ||
|
|
1e48096f36 | ||
|
|
ccf6a1bedb | ||
|
|
3639edb4db | ||
|
|
d3bbe0b3b6 | ||
|
|
e8ab101993 | ||
|
|
ef98b10063 | ||
|
|
84943e7fe6 | ||
|
|
d0fa5ff385 | ||
|
|
3609559ced | ||
|
|
950c780122 | ||
|
|
32b510ef40 | ||
|
|
4cc33ed29b | ||
|
|
d72906a6ba | ||
|
|
d577b51a86 | ||
|
|
63d4865427 | ||
|
|
1355477e37 | ||
|
|
d50e1b4e02 | ||
|
|
606ae41698 | ||
|
|
b6ee5ae779 | ||
|
|
aeb1b2c30f | ||
|
|
35ace281cc | ||
|
|
6cd056eee5 | ||
|
|
6c664bfaa7 | ||
|
|
8890d445dc | ||
|
|
7a7db1ea08 | ||
|
|
e585a3e5c4 | ||
|
|
7336032009 | ||
|
|
d29bc63b24 | ||
|
|
2a9bd1d4cb | ||
|
|
6578c14292 | ||
|
|
ceee93883f | ||
|
|
dae8fd2370 | ||
|
|
48f8322390 | ||
|
|
7df833bd9f | ||
|
|
2d639e191a | ||
|
|
db758c6806 | ||
|
|
6822e4ac0c | ||
|
|
14b1b07ecd | ||
|
|
3c71a20bb2 | ||
|
|
8f73619ba1 | ||
|
|
0ee6e5a35f | ||
|
|
22692757e6 | ||
|
|
ed9584270d | ||
|
|
5a5c35a1c9 | ||
|
|
1f842e4fe4 | ||
|
|
9275c4a6d6 | ||
|
|
9c7e61cbf3 | ||
|
|
69a6066fd8 | ||
|
|
47d2d09e50 | ||
|
|
da648e0a78 | ||
|
|
9e1c526d51 | ||
|
|
d81998f91c | ||
|
|
a717d9b8f3 | ||
|
|
31d1243a14 | ||
|
|
2424222964 | ||
|
|
370b245d65 | ||
|
|
c4dfcc27e3 | ||
|
|
dfa870a777 | ||
|
|
572375fff4 | ||
|
|
ed1caee9f8 | ||
|
|
6f7757c81b | ||
|
|
4c92965313 | ||
|
|
bbce96eb67 | ||
|
|
e3cb7bd4c7 | ||
|
|
79599bf831 | ||
|
|
1ab67bc225 | ||
|
|
37df213771 | ||
|
|
d48ffdb14f | ||
|
|
766cdc9f59 | ||
|
|
21a40c9d14 | ||
|
|
9275e9d006 | ||
|
|
ef9fe025e0 | ||
|
|
05694a8cda | ||
|
|
e6304cb028 | ||
|
|
b2d00784a4 | ||
|
|
ae31ebdc33 | ||
|
|
a2d50b380f | ||
|
|
654e8fd13f | ||
|
|
bcd44e4b2d | ||
|
|
5200793744 | ||
|
|
abcb29391c | ||
|
|
6a682dc143 | ||
|
|
d93d30537f | ||
|
|
377e88ff36 | ||
|
|
1733290c02 | ||
|
|
e702ccc48a | ||
|
|
ba729c493b | ||
|
|
1c55950b7e | ||
|
|
18c8282bac | ||
|
|
1d20456853 | ||
|
|
7e32d0ae10 | ||
|
|
5d33e45eae | ||
|
|
1590930ef9 | ||
|
|
8186d34f4e |
@@ -1,5 +1,7 @@
|
||||
module.exports = {
|
||||
extends: [
|
||||
'plugin:eslint-comments/recommended',
|
||||
|
||||
'standard',
|
||||
'standard-jsx',
|
||||
'prettier',
|
||||
@@ -19,7 +21,7 @@ module.exports = {
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['packages/*cli*/**/*.js', '*-cli.js'],
|
||||
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
},
|
||||
@@ -33,6 +35,9 @@ module.exports = {
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
// disabled because XAPI objects are using camel case
|
||||
camelcase: ['off'],
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
|
||||
@@ -46,6 +46,7 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.4.1",
|
||||
"xen-api": "^0.24.6"
|
||||
"xen-api": "^0.27.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,6 +55,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,6 +42,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.7.1",
|
||||
"version": "0.10.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
@@ -21,18 +21,19 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@marsaud/smb2": "^0.13.0",
|
||||
"@marsaud/smb2": "^0.14.0",
|
||||
"@sindresorhus/df": "^2.1.0",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"decorator-synchronized": "^0.5.0",
|
||||
"execa": "^1.0.0",
|
||||
"fs-extra": "^7.0.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"get-stream": "^4.0.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.0.33",
|
||||
"tmp": "^0.1.0",
|
||||
"xo-remote-parser": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -40,12 +41,13 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/plugin-proposal-decorators": "^7.1.6",
|
||||
"@babel/plugin-proposal-function-bind": "^7.0.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.4",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"dotenv": "^7.0.0",
|
||||
"dotenv": "^8.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
@@ -55,6 +57,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
import getStream from 'get-stream'
|
||||
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import limit from 'limit-concurrency-decorator'
|
||||
import path from 'path'
|
||||
import synchronized from 'decorator-synchronized'
|
||||
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
|
||||
@@ -31,6 +32,7 @@ const computeRate = (hrtime: number[], size: number) => {
|
||||
}
|
||||
|
||||
const DEFAULT_TIMEOUT = 6e5 // 10 min
|
||||
const DEFAULT_MAX_PARALLEL_OPERATIONS = 10
|
||||
|
||||
const ignoreEnoent = error => {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
@@ -83,6 +85,25 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
;({ timeout: this._timeout = DEFAULT_TIMEOUT } = options)
|
||||
|
||||
const sharedLimit = limit(
|
||||
options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS
|
||||
)
|
||||
this.closeFile = sharedLimit(this.closeFile)
|
||||
this.getInfo = sharedLimit(this.getInfo)
|
||||
this.getSize = sharedLimit(this.getSize)
|
||||
this.list = sharedLimit(this.list)
|
||||
this.mkdir = sharedLimit(this.mkdir)
|
||||
this.openFile = sharedLimit(this.openFile)
|
||||
this.outputFile = sharedLimit(this.outputFile)
|
||||
this.read = sharedLimit(this.read)
|
||||
this.readFile = sharedLimit(this.readFile)
|
||||
this.rename = sharedLimit(this.rename)
|
||||
this.rmdir = sharedLimit(this.rmdir)
|
||||
this.truncate = sharedLimit(this.truncate)
|
||||
this.unlink = sharedLimit(this.unlink)
|
||||
this.write = sharedLimit(this.write)
|
||||
this.writeFile = sharedLimit(this.writeFile)
|
||||
}
|
||||
|
||||
// Public members
|
||||
@@ -400,6 +421,10 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
async truncate(file: string, len: number): Promise<void> {
|
||||
await this._truncate(file, len)
|
||||
}
|
||||
|
||||
async unlink(file: string, { checksum = true }: Object = {}): Promise<void> {
|
||||
file = normalizePath(file)
|
||||
|
||||
@@ -410,6 +435,18 @@ export default class RemoteHandlerAbstract {
|
||||
await this._unlink(file).catch(ignoreEnoent)
|
||||
}
|
||||
|
||||
async write(
|
||||
file: File,
|
||||
buffer: Buffer,
|
||||
position: number
|
||||
): Promise<{| bytesWritten: number, buffer: Buffer |}> {
|
||||
await this._write(
|
||||
typeof file === 'string' ? normalizePath(file) : file,
|
||||
buffer,
|
||||
position
|
||||
)
|
||||
}
|
||||
|
||||
async writeFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
@@ -546,6 +583,28 @@ export default class RemoteHandlerAbstract {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _write(file: File, buffer: Buffer, position: number): Promise<void> {
|
||||
const isPath = typeof file === 'string'
|
||||
if (isPath) {
|
||||
file = await this.openFile(file, 'r+')
|
||||
}
|
||||
try {
|
||||
return await this._writeFd(file, buffer, position)
|
||||
} finally {
|
||||
if (isPath) {
|
||||
await this.closeFile(file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _writeFd(
|
||||
fd: FileDescriptor,
|
||||
buffer: Buffer,
|
||||
position: number
|
||||
): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _writeFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
import 'dotenv/config'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import getStream from 'get-stream'
|
||||
import { forOwn, random } from 'lodash'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
import { random } from 'lodash'
|
||||
import { tmpdir } from 'os'
|
||||
|
||||
import { getHandler } from '.'
|
||||
@@ -310,5 +310,70 @@ handlers.forEach(url => {
|
||||
await handler.unlink('file')
|
||||
})
|
||||
})
|
||||
|
||||
describe('#write()', () => {
|
||||
beforeEach(() => handler.outputFile('file', TEST_DATA))
|
||||
|
||||
const PATCH_DATA_LEN = Math.ceil(TEST_DATA_LEN / 2)
|
||||
const PATCH_DATA = unsecureRandomBytes(PATCH_DATA_LEN)
|
||||
|
||||
forOwn(
|
||||
{
|
||||
'dont increase file size': (() => {
|
||||
const offset = random(0, TEST_DATA_LEN - PATCH_DATA_LEN)
|
||||
|
||||
const expected = Buffer.from(TEST_DATA)
|
||||
PATCH_DATA.copy(expected, offset)
|
||||
|
||||
return { offset, expected }
|
||||
})(),
|
||||
'increase file size': (() => {
|
||||
const offset = random(
|
||||
TEST_DATA_LEN - PATCH_DATA_LEN + 1,
|
||||
TEST_DATA_LEN
|
||||
)
|
||||
|
||||
const expected = Buffer.alloc(offset + PATCH_DATA_LEN)
|
||||
TEST_DATA.copy(expected)
|
||||
PATCH_DATA.copy(expected, offset)
|
||||
|
||||
return { offset, expected }
|
||||
})(),
|
||||
},
|
||||
({ offset, expected }, title) => {
|
||||
describe(title, () => {
|
||||
testWithFileDescriptor('file', 'r+', async ({ file }) => {
|
||||
await handler.write(file, PATCH_DATA, offset)
|
||||
await expect(await handler.readFile('file')).toEqual(expected)
|
||||
})
|
||||
})
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
describe('#truncate()', () => {
|
||||
forOwn(
|
||||
{
|
||||
'shrinks file': (() => {
|
||||
const length = random(0, TEST_DATA_LEN)
|
||||
const expected = TEST_DATA.slice(0, length)
|
||||
return { length, expected }
|
||||
})(),
|
||||
'grows file': (() => {
|
||||
const length = random(TEST_DATA_LEN, TEST_DATA_LEN * 2)
|
||||
const expected = Buffer.alloc(length)
|
||||
TEST_DATA.copy(expected)
|
||||
return { length, expected }
|
||||
})(),
|
||||
},
|
||||
({ length, expected }, title) => {
|
||||
it(title, async () => {
|
||||
await handler.outputFile('file', TEST_DATA)
|
||||
await handler.truncate('file', length)
|
||||
await expect(await handler.readFile('file')).toEqual(expected)
|
||||
})
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -106,10 +106,18 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
await fs.access(path, fs.R_OK | fs.W_OK)
|
||||
}
|
||||
|
||||
_truncate(file, len) {
|
||||
return fs.truncate(this._getFilePath(file), len)
|
||||
}
|
||||
|
||||
async _unlink(file) {
|
||||
return fs.unlink(this._getFilePath(file))
|
||||
}
|
||||
|
||||
_writeFd(file, buffer, position) {
|
||||
return fs.write(file.fd, buffer, 0, buffer.length, position)
|
||||
}
|
||||
|
||||
_writeFile(file, data, { flags }) {
|
||||
return fs.writeFile(this._getFilePath(file), data, { flag: flags })
|
||||
}
|
||||
|
||||
@@ -155,10 +155,20 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
return this.list('.')
|
||||
}
|
||||
|
||||
_truncate(file, len) {
|
||||
return this._client
|
||||
.truncate(this._getFilePath(file), len)
|
||||
.catch(normalizeError)
|
||||
}
|
||||
|
||||
_unlink(file) {
|
||||
return this._client.unlink(this._getFilePath(file)).catch(normalizeError)
|
||||
}
|
||||
|
||||
_writeFd(file, buffer, position) {
|
||||
return this._client.write(file.fd, buffer, 0, buffer.length, position)
|
||||
}
|
||||
|
||||
_writeFile(file, data, options) {
|
||||
return this._client
|
||||
.writeFile(this._getFilePath(file), data, options)
|
||||
|
||||
@@ -24,6 +24,19 @@ log.info('this information is relevant to the user')
|
||||
log.warn('something went wrong but did not prevent current action')
|
||||
log.error('something went wrong')
|
||||
log.fatal('service/app is going down')
|
||||
|
||||
// you can add contextual info
|
||||
log.debug('new API request', {
|
||||
method: 'foo',
|
||||
params: [ 'bar', 'baz' ]
|
||||
user: 'qux'
|
||||
})
|
||||
|
||||
// by convention, errors go into the `error` field
|
||||
log.error('could not join server', {
|
||||
error,
|
||||
server: 'example.org',
|
||||
})
|
||||
```
|
||||
|
||||
Then, at application level, configure the logs are handled:
|
||||
|
||||
@@ -27,11 +27,11 @@
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.12.1"
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -48,6 +48,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import LEVELS, { NAMES } from '../levels'
|
||||
|
||||
// Bind console methods (necessary for browsers)
|
||||
/* eslint-disable no-console */
|
||||
const debugConsole = console.log.bind(console)
|
||||
const infoConsole = console.info.bind(console)
|
||||
const warnConsole = console.warn.bind(console)
|
||||
const errorConsole = console.error.bind(console)
|
||||
/* eslint-enable no-console */
|
||||
|
||||
const { ERROR, INFO, WARN } = LEVELS
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import splitHost from 'split-host' // eslint-disable-line node/no-extraneous-import node/no-missing-import
|
||||
import startsWith from 'lodash/startsWith'
|
||||
import { createClient, Facility, Severity, Transport } from 'syslog-client' // eslint-disable-line node/no-extraneous-import node/no-missing-import
|
||||
import splitHost from 'split-host'
|
||||
import { createClient, Facility, Severity, Transport } from 'syslog-client'
|
||||
|
||||
import LEVELS from '../levels'
|
||||
|
||||
@@ -19,10 +18,10 @@ const facility = Facility.User
|
||||
export default target => {
|
||||
const opts = {}
|
||||
if (target !== undefined) {
|
||||
if (startsWith(target, 'tcp://')) {
|
||||
if (target.startsWith('tcp://')) {
|
||||
target = target.slice(6)
|
||||
opts.transport = Transport.Tcp
|
||||
} else if (startsWith(target, 'udp://')) {
|
||||
} else if (target.startsWith('udp://')) {
|
||||
target = target.slice(6)
|
||||
opts.transport = Transport.Udp
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
212
CHANGELOG.md
@@ -1,6 +1,183 @@
|
||||
# ChangeLog
|
||||
|
||||
## Next (2019-03-19)
|
||||
## **next**
|
||||
|
||||
### Enhancements
|
||||
|
||||
### Bug fixes
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server v5.47.0
|
||||
- xo-web v5.47.0
|
||||
|
||||
## **5.37.0** (2019-07-25)
|
||||
|
||||

|
||||
|
||||
### Highlights
|
||||
|
||||
- [Pool] Ability to add multiple hosts on the pool [#2402](https://github.com/vatesfr/xen-orchestra/issues/2402) (PR [#3716](https://github.com/vatesfr/xen-orchestra/pull/3716))
|
||||
- [SR/General] Improve SR usage graph [#3608](https://github.com/vatesfr/xen-orchestra/issues/3608) (PR [#3830](https://github.com/vatesfr/xen-orchestra/pull/3830))
|
||||
- [VM] Permission to revert to any snapshot for VM operators [#3928](https://github.com/vatesfr/xen-orchestra/issues/3928) (PR [#4247](https://github.com/vatesfr/xen-orchestra/pull/4247))
|
||||
- [Backup NG] Ability to bypass unhealthy VDI chains check [#4324](https://github.com/vatesfr/xen-orchestra/issues/4324) (PR [#4340](https://github.com/vatesfr/xen-orchestra/pull/4340))
|
||||
- [VM/console] Multiline copy/pasting [#4261](https://github.com/vatesfr/xen-orchestra/issues/4261) (PR [#4341](https://github.com/vatesfr/xen-orchestra/pull/4341))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Stats] Ability to display last day stats [#4160](https://github.com/vatesfr/xen-orchestra/issues/4160) (PR [#4168](https://github.com/vatesfr/xen-orchestra/pull/4168))
|
||||
- [Settings/servers] Display servers connection issues [#4300](https://github.com/vatesfr/xen-orchestra/issues/4300) (PR [#4310](https://github.com/vatesfr/xen-orchestra/pull/4310))
|
||||
- [VM] Show current operations and progress [#3811](https://github.com/vatesfr/xen-orchestra/issues/3811) (PR [#3982](https://github.com/vatesfr/xen-orchestra/pull/3982))
|
||||
- [Backup NG/New] Generate default schedule if no schedule is specified [#4036](https://github.com/vatesfr/xen-orchestra/issues/4036) (PR [#4183](https://github.com/vatesfr/xen-orchestra/pull/4183))
|
||||
- [Host/Advanced] Ability to edit iSCSI IQN [#4048](https://github.com/vatesfr/xen-orchestra/issues/4048) (PR [#4208](https://github.com/vatesfr/xen-orchestra/pull/4208))
|
||||
- [VM,host] Improved state icons/pills (colors and tooltips) (PR [#4363](https://github.com/vatesfr/xen-orchestra/pull/4363))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Settings/Servers] Fix read-only setting toggling
|
||||
- [SDN Controller] Do not choose physical PIF without IP configuration for tunnels. (PR [#4319](https://github.com/vatesfr/xen-orchestra/pull/4319))
|
||||
- [Xen servers] Fix `no connection found for object` error if pool master is reinstalled [#4299](https://github.com/vatesfr/xen-orchestra/issues/4299) (PR [#4302](https://github.com/vatesfr/xen-orchestra/pull/4302))
|
||||
- [Backup-ng/restore] Display correct size for full VM backup [#4316](https://github.com/vatesfr/xen-orchestra/issues/4316) (PR [#4332](https://github.com/vatesfr/xen-orchestra/pull/4332))
|
||||
- [VM/tab-advanced] Fix CPU limits edition (PR [#4337](https://github.com/vatesfr/xen-orchestra/pull/4337))
|
||||
- [Remotes] Fix `EIO` errors due to massive parallel fs operations [#4323](https://github.com/vatesfr/xen-orchestra/issues/4323) (PR [#4330](https://github.com/vatesfr/xen-orchestra/pull/4330))
|
||||
- [VM/Advanced] Fix virtualization mode switch (PV/HVM) (PR [#4349](https://github.com/vatesfr/xen-orchestra/pull/4349))
|
||||
- [Task] fix hidden notification by search field [#3874](https://github.com/vatesfr/xen-orchestra/issues/3874) (PR [#4305](https://github.com/vatesfr/xen-orchestra/pull/4305)
|
||||
- [VM] Fail to change affinity (PR [#4361](https://github.com/vatesfr/xen-orchestra/pull/4361)
|
||||
- [VM] Number of CPUs not correctly changed on running VMs (PR [#4360](https://github.com/vatesfr/xen-orchestra/pull/4360)
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/fs v0.10.1
|
||||
- xo-server-sdn-controller v0.1.1
|
||||
- xen-api v0.27.1
|
||||
- xo-server v5.46.0
|
||||
- xo-web v5.46.0
|
||||
|
||||
## **5.36.0** (2019-06-27)
|
||||
|
||||

|
||||
|
||||
### Highlights
|
||||
|
||||
- [SR/new] Create ZFS storage [#4260](https://github.com/vatesfr/xen-orchestra/issues/4260) (PR [#4266](https://github.com/vatesfr/xen-orchestra/pull/4266))
|
||||
- [Host/advanced] Fix host CPU hyperthreading detection [#4262](https://github.com/vatesfr/xen-orchestra/issues/4262) (PR [#4285](https://github.com/vatesfr/xen-orchestra/pull/4285))
|
||||
- [VM/Advanced] Ability to use UEFI instead of BIOS [#4264](https://github.com/vatesfr/xen-orchestra/issues/4264) (PR [#4268](https://github.com/vatesfr/xen-orchestra/pull/4268))
|
||||
- [Backup-ng/restore] Display size for full VM backup [#4009](https://github.com/vatesfr/xen-orchestra/issues/4009) (PR [#4245](https://github.com/vatesfr/xen-orchestra/pull/4245))
|
||||
- [Sr/new] Ability to select NFS version when creating NFS storage [#3951](https://github.com/vatesfr/xen-orchestra/issues/3951) (PR [#4277](https://github.com/vatesfr/xen-orchestra/pull/4277))
|
||||
- [Host/storages, SR/hosts] Display PBD details [#4264](https://github.com/vatesfr/xen-orchestra/issues/4161) (PR [#4268](https://github.com/vatesfr/xen-orchestra/pull/4284))
|
||||
- [auth-saml] Improve compatibility with Microsoft Azure Active Directory (PR [#4294](https://github.com/vatesfr/xen-orchestra/pull/4294))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Host] Display warning when "Citrix Hypervisor" license has restrictions [#4251](https://github.com/vatesfr/xen-orchestra/issues/4164) (PR [#4235](https://github.com/vatesfr/xen-orchestra/pull/4279))
|
||||
- [VM/Backup] Create backup bulk action [#2573](https://github.com/vatesfr/xen-orchestra/issues/2573) (PR [#4257](https://github.com/vatesfr/xen-orchestra/pull/4257))
|
||||
- [Host] Display warning when host's time differs too much from XOA's time [#4113](https://github.com/vatesfr/xen-orchestra/issues/4113) (PR [#4173](https://github.com/vatesfr/xen-orchestra/pull/4173))
|
||||
- [VM/network] Display and set bandwidth rate-limit of a VIF [#4215](https://github.com/vatesfr/xen-orchestra/issues/4215) (PR [#4293](https://github.com/vatesfr/xen-orchestra/pull/4293))
|
||||
- [SDN Controller] New plugin which enables creating pool-wide private networks [xcp-ng/xcp#175](https://github.com/xcp-ng/xcp/issues/175) (PR [#4269](https://github.com/vatesfr/xen-orchestra/pull/4269))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [XOA] Don't require editing the _email_ field in case of re-registration (PR [#4259](https://github.com/vatesfr/xen-orchestra/pull/4259))
|
||||
- [Metadata backup] Missing XAPIs should trigger a failure job [#4281](https://github.com/vatesfr/xen-orchestra/issues/4281) (PR [#4283](https://github.com/vatesfr/xen-orchestra/pull/4283))
|
||||
- [iSCSI] Fix fibre channel paths display [#4291](https://github.com/vatesfr/xen-orchestra/issues/4291) (PR [#4303](https://github.com/vatesfr/xen-orchestra/pull/4303))
|
||||
- [New VM] Fix tooltips not displayed on disabled elements in some browsers (e.g. Google Chrome) [#4304](https://github.com/vatesfr/xen-orchestra/issues/4304) (PR [#4309](https://github.com/vatesfr/xen-orchestra/pull/4309))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-auth-ldap v0.6.5
|
||||
- xen-api v0.26.0
|
||||
- xo-server-sdn-controller v0.1
|
||||
- xo-server-auth-saml v0.6.0
|
||||
- xo-server-backup-reports v0.16.2
|
||||
- xo-server v5.44.0
|
||||
- xo-web v5.44.0
|
||||
|
||||
## **5.35.0** (2019-05-29)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/general] Display 'Started... ago' instead of 'Halted... ago' for paused state [#3750](https://github.com/vatesfr/xen-orchestra/issues/3750) (PR [#4170](https://github.com/vatesfr/xen-orchestra/pull/4170))
|
||||
- [Metadata backup] Ability to define when the backup report will be sent (PR [#4149](https://github.com/vatesfr/xen-orchestra/pull/4149))
|
||||
- [XOA/Update] Ability to select release channel [#4200](https://github.com/vatesfr/xen-orchestra/issues/4200) (PR [#4202](https://github.com/vatesfr/xen-orchestra/pull/4202))
|
||||
- [User] Forget connection tokens on password change or on demand [#4214](https://github.com/vatesfr/xen-orchestra/issues/4214) (PR [#4224](https://github.com/vatesfr/xen-orchestra/pull/4224))
|
||||
- [Settings/Logs] LICENCE_RESTRICTION errors: suggest XCP-ng as an Open Source alternative [#3876](https://github.com/vatesfr/xen-orchestra/issues/3876) (PR [#4238](https://github.com/vatesfr/xen-orchestra/pull/4238))
|
||||
- [VM/Migrate] Display VDI size on migrate modal [#2534](https://github.com/vatesfr/xen-orchestra/issues/2534) (PR [#4250](https://github.com/vatesfr/xen-orchestra/pull/4250))
|
||||
- [Host] Display hyperthreading status on advanced tab [#4262](https://github.com/vatesfr/xen-orchestra/issues/4262) (PR [#4263](https://github.com/vatesfr/xen-orchestra/pull/4263))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Pool/Patches] Fix "an error has occurred" in "Applied patches" [#4192](https://github.com/vatesfr/xen-orchestra/issues/4192) (PR [#4193](https://github.com/vatesfr/xen-orchestra/pull/4193))
|
||||
- [Backup NG] Fix report sent even though "Never" is selected [#4092](https://github.com/vatesfr/xen-orchestra/issues/4092) (PR [#4178](https://github.com/vatesfr/xen-orchestra/pull/4178))
|
||||
- [Remotes] Fix issues after a config import (PR [#4197](https://github.com/vatesfr/xen-orchestra/pull/4197))
|
||||
- [Charts] Fixed the chart lines sometimes changing order/color (PR [#4221](https://github.com/vatesfr/xen-orchestra/pull/4221))
|
||||
- Prevent non-admin users to access admin pages with URL (PR [#4220](https://github.com/vatesfr/xen-orchestra/pull/4220))
|
||||
- [Upgrade] Fix alert before upgrade while running backup jobs [#4164](https://github.com/vatesfr/xen-orchestra/issues/4164) (PR [#4235](https://github.com/vatesfr/xen-orchestra/pull/4235))
|
||||
- [Import] Fix import OVA files (PR [#4232](https://github.com/vatesfr/xen-orchestra/pull/4232))
|
||||
- [VM/network] Fix duplicate IPv4 (PR [#4239](https://github.com/vatesfr/xen-orchestra/pull/4239))
|
||||
- [Remotes] Fix disconnected remotes which may appear to work
|
||||
- [Host] Fix incorrect hypervisor name [#4246](https://github.com/vatesfr/xen-orchestra/issues/4246) (PR [#4248](https://github.com/vatesfr/xen-orchestra/pull/4248))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-backup-reports v0.16.1
|
||||
- @xen-orchestra/fs v0.9.0
|
||||
- vhd-lib v0.7.0
|
||||
- xo-server v5.42.1
|
||||
- xo-web v5.42.1
|
||||
|
||||
## **5.34.0** (2019-04-30)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Self/New VM] Add network config box to custom cloud-init [#3872](https://github.com/vatesfr/xen-orchestra/issues/3872) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4150))
|
||||
- [Metadata backup] Detailed logs [#4005](https://github.com/vatesfr/xen-orchestra/issues/4005) (PR [#4014](https://github.com/vatesfr/xen-orchestra/pull/4014))
|
||||
- [Backup reports] Support metadata backups (PR [#4084](https://github.com/vatesfr/xen-orchestra/pull/4084))
|
||||
- [VM migration] Auto select default SR and collapse optional actions [#3326](https://github.com/vatesfr/xen-orchestra/issues/3326) (PR [#4121](https://github.com/vatesfr/xen-orchestra/pull/4121))
|
||||
- Unlock basic stats on all editions [#4166](https://github.com/vatesfr/xen-orchestra/issues/4166) (PR [#4172](https://github.com/vatesfr/xen-orchestra/pull/4172))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Settings/remotes] Expose mount options field for SMB [#4063](https://github.com/vatesfr/xen-orchestra/issues/4063) (PR [#4067](https://github.com/vatesfr/xen-orchestra/pull/4067))
|
||||
- [Backup/Schedule] Add warning regarding DST when you add a schedule [#4042](https://github.com/vatesfr/xen-orchestra/issues/4042) (PR [#4056](https://github.com/vatesfr/xen-orchestra/pull/4056))
|
||||
- [Import] Avoid blocking the UI when dropping a big OVA file on the UI (PR [#4018](https://github.com/vatesfr/xen-orchestra/pull/4018))
|
||||
- [Backup NG/Overview] Make backup list title clearer [#4111](https://github.com/vatesfr/xen-orchestra/issues/4111) (PR [#4129](https://github.com/vatesfr/xen-orchestra/pull/4129))
|
||||
- [Dashboard] Hide "Report" section for non-admins [#4123](https://github.com/vatesfr/xen-orchestra/issues/4123) (PR [#4126](https://github.com/vatesfr/xen-orchestra/pull/4126))
|
||||
- [Self/New VM] Display confirmation modal when user will use a large amount of resources [#4044](https://github.com/vatesfr/xen-orchestra/issues/4044) (PR [#4127](https://github.com/vatesfr/xen-orchestra/pull/4127))
|
||||
- [VDI migration, New disk] Warning when SR host is different from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4035](https://github.com/vatesfr/xen-orchestra/pull/4035))
|
||||
- [Attach disk] Display warning message when VDI SR is on different host from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4117](https://github.com/vatesfr/xen-orchestra/pull/4117))
|
||||
- [Editable] Notify user when editable undo fails [#3799](https://github.com/vatesfr/xen-orchestra/issues/3799) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4157))
|
||||
- [XO] Add banner for sources users to clarify support conditions [#4165](https://github.com/vatesfr/xen-orchestra/issues/4165) (PR [#4167](https://github.com/vatesfr/xen-orchestra/pull/4167))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Continuous Replication] Fix VHD size guess for empty files [#4105](https://github.com/vatesfr/xen-orchestra/issues/4105) (PR [#4107](https://github.com/vatesfr/xen-orchestra/pull/4107))
|
||||
- [Backup NG] Only display full backup interval in case of a delta backup (PR [#4125](https://github.com/vatesfr/xen-orchestra/pull/4107))
|
||||
- [Dashboard/Health] fix 'an error has occurred' on the storage state table [#4128](https://github.com/vatesfr/xen-orchestra/issues/4128) (PR [#4132](https://github.com/vatesfr/xen-orchestra/pull/4132))
|
||||
- [Menu] XOA: Fixed empty slot when menu is collapsed [#4012](https://github.com/vatesfr/xen-orchestra/issues/4012) (PR [#4068](https://github.com/vatesfr/xen-orchestra/pull/4068)
|
||||
- [Self/New VM] Fix missing templates when refreshing page [#3265](https://github.com/vatesfr/xen-orchestra/issues/3265) (PR [#3565](https://github.com/vatesfr/xen-orchestra/pull/3565))
|
||||
- [Home] No more false positives when select Tag on Home page [#4087](https://github.com/vatesfr/xen-orchestra/issues/4087) (PR [#4112](https://github.com/vatesfr/xen-orchestra/pull/4112))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-backup-reports v0.16.0
|
||||
- complex-matcher v0.6.0
|
||||
- xo-vmdk-to-vhd v0.1.7
|
||||
- vhd-lib v0.6.1
|
||||
- xo-server v5.40.0
|
||||
- xo-web v5.40.1
|
||||
|
||||
## **5.33.1** (2019-04-04)
|
||||
|
||||
### Bug fix
|
||||
|
||||
- Fix major memory leak [2563be4](https://github.com/vatesfr/xen-orchestra/commit/2563be472bfd84c6ed867efd21c4aeeb824d387f)
|
||||
|
||||
### Released packages
|
||||
|
||||
- xen-api v0.25.1
|
||||
- xo-server v5.38.2
|
||||
|
||||
## **5.33.0** (2019-03-29)
|
||||
|
||||
### Enhancements
|
||||
|
||||
@@ -13,6 +190,20 @@
|
||||
- [Home] Save the current page in url [#3993](https://github.com/vatesfr/xen-orchestra/issues/3993) (PR [#3999](https://github.com/vatesfr/xen-orchestra/pull/3999))
|
||||
- [VDI] Ensure suspend VDI is destroyed when destroying a VM [#4027](https://github.com/vatesfr/xen-orchestra/issues/4027) (PR [#4038](https://github.com/vatesfr/xen-orchestra/pull/4038))
|
||||
- [VM/disk]: Warning when 2 VDIs are on 2 different hosts' local SRs [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3969](https://github.com/vatesfr/xen-orchestra/pull/3969))
|
||||
- [Remotes] Benchmarks (read and write rate speed) added when remote is tested [#3991](https://github.com/vatesfr/xen-orchestra/issues/3991) (PR [#4015](https://github.com/vatesfr/xen-orchestra/pull/4015))
|
||||
- [Cloud Config] Support both NoCloud and Config Drive 2 datasources for maximum compatibility (PR [#4053](https://github.com/vatesfr/xen-orchestra/pull/4053))
|
||||
- [Advanced] Configurable cookie validity (PR [#4059](https://github.com/vatesfr/xen-orchestra/pull/4059))
|
||||
- [Plugins] Display number of installed plugins [#4008](https://github.com/vatesfr/xen-orchestra/issues/4008) (PR [#4050](https://github.com/vatesfr/xen-orchestra/pull/4050))
|
||||
- [Continuous Replication] Opt-in mode to guess VHD size, should help with XenServer 7.1 CU2 and various `VDI_IO_ERROR` errors (PR [#3726](https://github.com/vatesfr/xen-orchestra/pull/3726))
|
||||
- [VM/Snapshots] Always delete broken quiesced snapshots [#4074](https://github.com/vatesfr/xen-orchestra/issues/4074) (PR [#4075](https://github.com/vatesfr/xen-orchestra/pull/4075))
|
||||
- [Settings/Servers] Display link to pool [#4041](https://github.com/vatesfr/xen-orchestra/issues/4041) (PR [#4045](https://github.com/vatesfr/xen-orchestra/pull/4045))
|
||||
- [Import] Change wording of drop zone (PR [#4020](https://github.com/vatesfr/xen-orchestra/pull/4020))
|
||||
- [Backup NG] Ability to set the interval of the full backups [#1783](https://github.com/vatesfr/xen-orchestra/issues/1783) (PR [#4083](https://github.com/vatesfr/xen-orchestra/pull/4083))
|
||||
- [Hosts] Display a warning icon if you have XenServer license restrictions [#4091](https://github.com/vatesfr/xen-orchestra/issues/4091) (PR [#4094](https://github.com/vatesfr/xen-orchestra/pull/4094))
|
||||
- [Restore] Ability to restore a metadata backup [#4004](https://github.com/vatesfr/xen-orchestra/issues/4004) (PR [#4023](https://github.com/vatesfr/xen-orchestra/pull/4023))
|
||||
- Improve connection to XCP-ng/XenServer hosts:
|
||||
- never disconnect by itself even in case of errors
|
||||
- never stop watching events
|
||||
|
||||
### Bug fixes
|
||||
|
||||
@@ -22,6 +213,25 @@
|
||||
- [Home/VM] Bulk migration: fixed VM VDIs not migrated to the selected SR [#3986](https://github.com/vatesfr/xen-orchestra/issues/3986) (PR [#3987](https://github.com/vatesfr/xen-orchestra/pull/3987))
|
||||
- [Stats] Fix cache usage with simultaneous requests [#4017](https://github.com/vatesfr/xen-orchestra/issues/4017) (PR [#4028](https://github.com/vatesfr/xen-orchestra/pull/4028))
|
||||
- [Backup NG] Fix compression displayed for the wrong backup mode (PR [#4021](https://github.com/vatesfr/xen-orchestra/pull/4021))
|
||||
- [Home] Always sort the items by their names as a secondary sort criteria [#3983](https://github.com/vatesfr/xen-orchestra/issues/3983) (PR [#4047](https://github.com/vatesfr/xen-orchestra/pull/4047))
|
||||
- [Remotes] Fixes `spawn mount EMFILE` error during backup
|
||||
- Properly redirect to sign in page instead of being stuck in a refresh loop
|
||||
- [Backup-ng] No more false positives when list matching VMs on Home page [#4078](https://github.com/vatesfr/xen-orchestra/issues/4078) (PR [#4085](https://github.com/vatesfr/xen-orchestra/pull/4085))
|
||||
- [Plugins] Properly remove optional settings when unchecking _Fill information_ (PR [#4076](https://github.com/vatesfr/xen-orchestra/pull/4076))
|
||||
- [Patches] (PR [#4077](https://github.com/vatesfr/xen-orchestra/pull/4077))
|
||||
- Add a host to a pool: fixes the auto-patching of the host on XenServer < 7.2 [#3783](https://github.com/vatesfr/xen-orchestra/issues/3783)
|
||||
- Add a host to a pool: homogenizes both the host and **pool**'s patches [#2188](https://github.com/vatesfr/xen-orchestra/issues/2188)
|
||||
- Safely install a subset of patches on a pool [#3777](https://github.com/vatesfr/xen-orchestra/issues/3777)
|
||||
- XCP-ng: no longer requires to run `yum install xcp-ng-updater` when it's already installed [#3934](https://github.com/vatesfr/xen-orchestra/issues/3934)
|
||||
|
||||
### Released packages
|
||||
|
||||
- xen-api v0.25.0
|
||||
- vhd-lib v0.6.0
|
||||
- @xen-orchestra/fs v0.8.0
|
||||
- xo-server-usage-report v0.7.2
|
||||
- xo-server v5.38.1
|
||||
- xo-web v5.38.0
|
||||
|
||||
## **5.32.2** (2019-02-28)
|
||||
|
||||
|
||||
@@ -1,37 +1,35 @@
|
||||
> This file contains all changes that have not been released yet.
|
||||
>
|
||||
> Keep in mind the changelog is addressed to **users** and should be
|
||||
> understandable by them.
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Remotes] Benchmarks (read and write rate speed) added when remote is tested [#3991](https://github.com/vatesfr/xen-orchestra/issues/3991) (PR [#4015](https://github.com/vatesfr/xen-orchestra/pull/4015))
|
||||
- [Cloud Config] Support both NoCloud and Config Drive 2 datasources for maximum compatibility (PR [#4053](https://github.com/vatesfr/xen-orchestra/pull/4053))
|
||||
- [Advanced] Configurable cookie validity (PR [#4059](https://github.com/vatesfr/xen-orchestra/pull/4059))
|
||||
- [Plugins] Display number of installed plugins [#4008](https://github.com/vatesfr/xen-orchestra/issues/4008) (PR [#4050](https://github.com/vatesfr/xen-orchestra/pull/4050))
|
||||
- [Continuous Replication] Opt-in mode to guess VHD size, should help with XenServer 7.1 CU2 and various `VDI_IO_ERROR` errors (PR [#3726](https://github.com/vatesfr/xen-orchestra/pull/3726))
|
||||
- [VM/Snapshots] Always delete broken quiesced snapshots [#4074](https://github.com/vatesfr/xen-orchestra/issues/4074) (PR [#4075](https://github.com/vatesfr/xen-orchestra/pull/4075))
|
||||
- [Settings/Servers] Display link to pool [#4041](https://github.com/vatesfr/xen-orchestra/issues/4041) (PR [#4045](https://github.com/vatesfr/xen-orchestra/pull/4045))
|
||||
- [Import] Change wording of drop zone (PR [#4020](https://github.com/vatesfr/xen-orchestra/pull/4020))
|
||||
- [Backup NG] Ability to set the interval of the full backups [#1783](https://github.com/vatesfr/xen-orchestra/issues/1783) (PR [#4083](https://github.com/vatesfr/xen-orchestra/pull/4083))
|
||||
- [Hosts] Display a warning icon if you have XenServer license restrictions [#4091](https://github.com/vatesfr/xen-orchestra/issues/4091) (PR [#4094](https://github.com/vatesfr/xen-orchestra/pull/4094))
|
||||
- [Restore] Ability to restore a metadata backup [#4004](https://github.com/vatesfr/xen-orchestra/issues/4004) (PR [#4023](https://github.com/vatesfr/xen-orchestra/pull/4023))
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [SDN Controller] Let the user choose on which PIF to create a private network (PR [#4379](https://github.com/vatesfr/xen-orchestra/pull/4379))
|
||||
- [VM/Attach disk] Display confirmation modal when VDI is already attached [#3381](https://github.com/vatesfr/xen-orchestra/issues/3381) (PR [#4366](https://github.com/vatesfr/xen-orchestra/pull/4366))
|
||||
- [Zstd]
|
||||
- [VM/copy, VM/export] Only show zstd option when it's supported [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PRs [#4326](https://github.com/vatesfr/xen-orchestra/pull/4326) [#4368](https://github.com/vatesfr/xen-orchestra/pull/4368))
|
||||
- [VM/Bulk copy] Show warning if zstd compression is not supported on a VM [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PR [#4346](https://github.com/vatesfr/xen-orchestra/pull/4346))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Home] Always sort the items by their names as a secondary sort criteria [#3983](https://github.com/vatesfr/xen-orchestra/issues/3983) (PR [#4047](https://github.com/vatesfr/xen-orchestra/pull/4047))
|
||||
- [Remotes] Fixes `spawn mount EMFILE` error during backup
|
||||
- Properly redirect to sign in page instead of being stuck in a refresh loop
|
||||
- [Backup-ng] No more false positives when list matching VMs on Home page [#4078](https://github.com/vatesfr/xen-orchestra/issues/4078) (PR [#4085](https://github.com/vatesfr/xen-orchestra/pull/4085))
|
||||
- [Plugins] Properly remove optional settings when unchecking _Fill information_ (PR [#4076](https://github.com/vatesfr/xen-orchestra/pull/4076))
|
||||
- [Patches] (PR [#4077](https://github.com/vatesfr/xen-orchestra/pull/4077))
|
||||
- Add a host to a pool: fixes the auto-patching of the host on XenServer < 7.2 [#3783](https://github.com/vatesfr/xen-orchestra/issues/3783)
|
||||
- Add a host to a pool: homogenizes both the host and **pool**'s patches [#2188](https://github.com/vatesfr/xen-orchestra/issues/2188)
|
||||
- Safely install a subset of patches on a pool [#3777](https://github.com/vatesfr/xen-orchestra/issues/3777)
|
||||
- XCP-ng: no longer requires to run `yum install xcp-ng-updater` when it's already installed [#3934](https://github.com/vatesfr/xen-orchestra/issues/3934)
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [SDN Controller] Better detect host shutting down to adapt network topology (PR [#4314](https://github.com/vatesfr/xen-orchestra/pull/4314))
|
||||
- [SR/General] Display VDI VM name in SR usage graph (PR [#4370](https://github.com/vatesfr/xen-orchestra/pull/4370))
|
||||
- [SDN Controller] Add new hosts to pool's private networks (PR [#4382](https://github.com/vatesfr/xen-orchestra/pull/4382))
|
||||
- [VM/Attach disk] Fix checking VDI mode (PR [#4373](https://github.com/vatesfr/xen-orchestra/pull/4373))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xen-api v0.24.6
|
||||
- vhd-lib v0.6.0
|
||||
- @xen-orchestra/fs v0.8.0
|
||||
- xo-server-usage-report v0.7.2
|
||||
- xo-server v5.38.0
|
||||
- xo-web v5.38.0
|
||||
> Packages will be released in the order they are here, therefore, they should
|
||||
> be listed by inverse order of dependency.
|
||||
>
|
||||
> Rule of thumb: add packages on top.
|
||||
|
||||
- xo-server-usage-report v0.7.3
|
||||
- xo-server-sdn-controller v0.1.2
|
||||
- xo-server v5.47.0
|
||||
- xo-web v5.47.0
|
||||
|
||||
@@ -14,5 +14,5 @@
|
||||
|
||||
1. create a PR as soon as possible
|
||||
1. mark it as `WiP:` (Work in Progress) if not ready to be merged
|
||||
1. when you want a review, add a reviewer
|
||||
1. when you want a review, add a reviewer (and only one)
|
||||
1. if necessary, update your PR, and re- add a reviewer
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Xen Orchestra [](https://go.crisp.im/chat/embed/?website_id=-JzqzzwddSV7bKGtEyAQ) [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
# Xen Orchestra [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||

|
||||
|
||||
|
||||
BIN
docs/assets/metadata-1.png
Normal file
|
After Width: | Height: | Size: 9.4 KiB |
BIN
docs/assets/metadata-2.png
Normal file
|
After Width: | Height: | Size: 71 KiB |
BIN
docs/assets/metadata-3.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
docs/assets/metadata-4.png
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
docs/assets/metadata-5.png
Normal file
|
After Width: | Height: | Size: 55 KiB |
BIN
docs/assets/metadata-6.png
Normal file
|
After Width: | Height: | Size: 57 KiB |
BIN
docs/assets/metadata-7.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
@@ -1,13 +1,13 @@
|
||||
|
||||
# Installation
|
||||
|
||||
SSH to your XenServer and execute the following:
|
||||
SSH to your XenServer/XCP-ng host and execute the following:
|
||||
|
||||
```
|
||||
bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
```
|
||||
|
||||
This will automatically download/import/start the XOA appliance. Nothing is changed on your XenServer host itself, it's 100% safe.
|
||||
This will automatically download/import/start the XOA appliance. Nothing is changed on your host itself, it's 100% safe.
|
||||
|
||||
## [More on XOA](xoa.md)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Metadata backup
|
||||
|
||||
> WARNING: Metadata backup is an experimental feature. Restore is not yet available and some unexpected issues may occur.
|
||||
> WARNING: Metadata backup is an experimental feature. Unexpected issues are possible, but unlikely.
|
||||
|
||||
## Introduction
|
||||
|
||||
@@ -11,21 +11,38 @@ In Xen Orchestra, Metadata backup is divided into two different options:
|
||||
* Pool metadata backup
|
||||
* XO configuration backup
|
||||
|
||||
### How to use metadata backup
|
||||
### Performing a backup
|
||||
|
||||
In the backup job section, when creating a new backup job, you will now have a choice between backing up VMs and backing up Metadata.
|
||||

|
||||
In the backup job section, when creating a new backup job, you will now have a choice between backing up VMs and backing up Metadata:
|
||||

|
||||
|
||||
When you select Metadata backup, you will have a new backup job screen, letting you choose between a pool metadata backup and an XO configuration backup (or both at the same time):
|
||||
|
||||

|
||||

|
||||
|
||||
Define the name and retention for the job.
|
||||
|
||||

|
||||

|
||||
|
||||
Once created, the job is displayed with the other classic jobs.
|
||||
|
||||

|
||||

|
||||
|
||||
> Restore for metadata backup jobs should be available in XO 5.33
|
||||
|
||||
### Performing a restore
|
||||
|
||||
> WARNING: restoring pool metadata completely overwrites the XAPI database of a host. Only perform a metadata restore if it is a new server with nothing running on it (eg replacing a host with new hardware).
|
||||
|
||||
If you browse to the Backup NG Restore panel, you will now notice a Metadata filter button:
|
||||
|
||||

|
||||
|
||||
If you click this button, it will show you Metadata backups available for restore:
|
||||
|
||||

|
||||
|
||||
You can see both our Xen Orchestra config backup, and our pool metadata backup. To restore one, simply click the blue restore arrow, choose a backup date to restore, and click OK:
|
||||
|
||||

|
||||
|
||||
That's it!
|
||||
@@ -1,24 +1,33 @@
|
||||
# Support
|
||||
|
||||
You can access our pro support if you subscribe to any of these plans:
|
||||
Xen Orchestra will run in a controlled/tested environment thanks to XOA ([Xen Orchestra virtual Appliance](https://xen-orchestra.com/#!/xoa)). **This is the way to get pro support**. Any account with a registered XOA can access a [dedicated support panel](https://xen-orchestra.com/#!/member/support).
|
||||
|
||||
XOA is available in multiple plans:
|
||||
|
||||
* Free
|
||||
* Starter
|
||||
* Enterprise
|
||||
* Premium
|
||||
|
||||
The better the plan, the faster the support will be with higher priority.
|
||||
Higher tier support plans include faster ticket response times (and cover more features). Paid support plans and response times are based on the plan you have, plans can be [reviewed here](https://xen-orchestra.com/#!/xo-pricing).
|
||||
|
||||
## XOA Free support
|
||||
|
||||
With the free version of the Xen Orchestra Appliance (XOA free), you can open support tickets and we will do our best to assist you, however, this support is limited and is not guaranteed in regards to response times or resolutions offered.
|
||||
|
||||
## Community support
|
||||
|
||||
If you are using Xen Orchestra via the sources, you can ask questions and try to recieve help two different ways:
|
||||
If you are using Xen Orchestra via the source and not XOA, you can ask questions and try to recieve help through a number of different ways:
|
||||
|
||||
* In our [forum](https://xen-orchestra.com/forum/)
|
||||
* In our [forum](https://xcp-ng.org/forum/category/12/xen-orchestra)
|
||||
* In our IRC - `#xen-orchestra` on `Freenode`
|
||||
|
||||
However, there's no guarantee you will receive an answer and no guaranteed response time. If you are using XO from sources, we encourage you to give back to the community by assisting other users via these two avenues as well.
|
||||
We encourage you to give back to the community by assisting other users via these two avenues as well.
|
||||
|
||||
If you are using Xen Orchestra in production, please subscribe to a plan.
|
||||
Lastly while Xen Orchestra is free and Open Source software, supporting and developing it takes a lot of effort. If you are considering using Xen Orchestra in production, please subscribe for one of our [professional support plans](https://xen-orchestra.com/#!/xo-pricing).
|
||||
|
||||
> Note: support from the sources is harder, because Xen Orchestra can potentially run on any Linux distro (or even FreeBSD and Windows!). Always try to double check that you followed our guide on how to [install it from the sources](https://xen-orchestra.com/docs/from_the_sources.html) before going further.
|
||||
|
||||
## Open a ticket
|
||||
|
||||
If you have a subscription, you can open a ticket describing your issue directly from your personal account page [here](https://xen-orchestra.com/#!/member/support)
|
||||
If you have a subscription (or at least a registered free XOA), you can open a ticket describing your issue directly from your personal account page [here](https://xen-orchestra.com/#!/member/support)
|
||||
|
||||
16
package.json
@@ -6,23 +6,24 @@
|
||||
"babel-eslint": "^10.0.1",
|
||||
"babel-jest": "^24.1.0",
|
||||
"benchmark": "^2.1.4",
|
||||
"eslint": "^5.1.0",
|
||||
"eslint-config-prettier": "^4.1.0",
|
||||
"eslint": "^6.0.1",
|
||||
"eslint-config-prettier": "^6.0.0",
|
||||
"eslint-config-standard": "12.0.0",
|
||||
"eslint-config-standard-jsx": "^6.0.2",
|
||||
"eslint-plugin-eslint-comments": "^3.1.1",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-plugin-node": "^8.0.0",
|
||||
"eslint-plugin-node": "^9.0.1",
|
||||
"eslint-plugin-promise": "^4.0.0",
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.95.1",
|
||||
"globby": "^9.0.0",
|
||||
"husky": "^1.2.1",
|
||||
"flow-bin": "^0.102.0",
|
||||
"globby": "^10.0.0",
|
||||
"husky": "^3.0.0",
|
||||
"jest": "^24.1.0",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^1.10.2",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"sorted-object": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
@@ -41,6 +42,7 @@
|
||||
"testEnvironment": "node",
|
||||
"testPathIgnorePatterns": [
|
||||
"/dist/",
|
||||
"/xo-server-test/",
|
||||
"/xo-web/"
|
||||
],
|
||||
"testRegex": "\\.spec\\.js$",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "complex-matcher",
|
||||
"version": "0.5.0",
|
||||
"version": "0.6.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -25,7 +25,7 @@
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4"
|
||||
@@ -44,6 +44,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -599,6 +599,13 @@ export const parse = parser.parse.bind(parser)
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const _extractStringFromRegexp = child => {
|
||||
const unescapedRegexp = child.re.source.replace(/^(\^)|\\|\$$/g, '')
|
||||
if (child.re.source === `^${escapeRegExp(unescapedRegexp)}$`) {
|
||||
return unescapedRegexp
|
||||
}
|
||||
}
|
||||
|
||||
const _getPropertyClauseStrings = ({ child }) => {
|
||||
if (child instanceof Or) {
|
||||
const strings = []
|
||||
@@ -606,6 +613,12 @@ const _getPropertyClauseStrings = ({ child }) => {
|
||||
if (child instanceof StringNode) {
|
||||
strings.push(child.value)
|
||||
}
|
||||
if (child instanceof RegExpNode) {
|
||||
const unescapedRegexp = _extractStringFromRegexp(child)
|
||||
if (unescapedRegexp !== undefined) {
|
||||
strings.push(unescapedRegexp)
|
||||
}
|
||||
}
|
||||
})
|
||||
return strings
|
||||
}
|
||||
@@ -613,6 +626,12 @@ const _getPropertyClauseStrings = ({ child }) => {
|
||||
if (child instanceof StringNode) {
|
||||
return [child.value]
|
||||
}
|
||||
if (child instanceof RegExpNode) {
|
||||
const unescapedRegexp = _extractStringFromRegexp(child)
|
||||
if (unescapedRegexp !== undefined) {
|
||||
return [unescapedRegexp]
|
||||
}
|
||||
}
|
||||
|
||||
return []
|
||||
}
|
||||
|
||||
@@ -12,10 +12,13 @@ import {
|
||||
} from './'
|
||||
|
||||
it('getPropertyClausesStrings', () => {
|
||||
const tmp = getPropertyClausesStrings(parse('foo bar:baz baz:|(foo bar)'))
|
||||
const tmp = getPropertyClausesStrings(
|
||||
parse('foo bar:baz baz:|(foo bar /^boo$/ /^far$/) foo:/^bar$/')
|
||||
)
|
||||
expect(tmp).toEqual({
|
||||
bar: ['baz'],
|
||||
baz: ['foo', 'bar'],
|
||||
baz: ['foo', 'bar', 'boo', 'far'],
|
||||
foo: ['bar'],
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
@@ -43,6 +43,7 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,12 +27,12 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.7.1",
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"cli-progress": "^2.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"struct-fu": "^1.2.0",
|
||||
"vhd-lib": "^0.5.1"
|
||||
"vhd-lib": "^0.7.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -40,17 +40,18 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^1.0.0",
|
||||
"execa": "^2.0.2",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"rimraf": "^2.6.1",
|
||||
"tmp": "^0.0.33"
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vhd-lib",
|
||||
"version": "0.5.1",
|
||||
"version": "0.7.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"keywords": [],
|
||||
@@ -22,11 +22,11 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"core-js": "3.0.0",
|
||||
"core-js": "^3.0.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^7.0.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1"
|
||||
},
|
||||
@@ -35,16 +35,16 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"@xen-orchestra/fs": "^0.7.1",
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^1.0.0",
|
||||
"execa": "^2.0.2",
|
||||
"fs-promise": "^2.0.0",
|
||||
"get-stream": "^4.0.0",
|
||||
"get-stream": "^5.1.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"rimraf": "^2.6.2",
|
||||
"tmp": "^0.0.33"
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
@@ -52,6 +52,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,9 +19,7 @@ export default bat => {
|
||||
j += 4
|
||||
|
||||
if (j === n) {
|
||||
const error = new Error('no allocated block found')
|
||||
error.noBlock = true
|
||||
throw error
|
||||
return
|
||||
}
|
||||
}
|
||||
lastSector = firstSector
|
||||
|
||||
@@ -23,71 +23,110 @@ afterEach(async () => {
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
})
|
||||
|
||||
async function convertFromRawToVhd(rawName, vhdName) {
|
||||
await execa('qemu-img', ['convert', '-f', 'raw', '-Ovpc', rawName, vhdName])
|
||||
}
|
||||
const RAW = 'raw'
|
||||
const VHD = 'vpc'
|
||||
const convert = (inputFormat, inputFile, outputFormat, outputFile) =>
|
||||
execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
inputFormat,
|
||||
'-O',
|
||||
outputFormat,
|
||||
inputFile,
|
||||
outputFile,
|
||||
])
|
||||
|
||||
const createRandomStream = asyncIteratorToStream(function*(size) {
|
||||
let requested = Math.min(size, yield)
|
||||
while (size > 0) {
|
||||
const buf = Buffer.allocUnsafe(requested)
|
||||
for (let i = 0; i < requested; ++i) {
|
||||
buf[i] = Math.floor(Math.random() * 256)
|
||||
}
|
||||
requested = Math.min((size -= requested), yield buf)
|
||||
}
|
||||
})
|
||||
|
||||
async function createRandomFile(name, size) {
|
||||
const createRandomStream = asyncIteratorToStream(function*(size) {
|
||||
while (size-- > 0) {
|
||||
yield Buffer.from([Math.floor(Math.random() * 256)])
|
||||
}
|
||||
})
|
||||
const input = await createRandomStream(size)
|
||||
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
|
||||
}
|
||||
|
||||
test('createVhdStreamWithLength can extract length', async () => {
|
||||
const initialSize = 4 * 1024
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdName = `${tempDir}/randomfile.vhd`
|
||||
const outputVhdName = `${tempDir}/output.vhd`
|
||||
await createRandomFile(rawFileName, initialSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdName)
|
||||
const vhdSize = fs.statSync(vhdName).size
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(vhdName)
|
||||
)
|
||||
expect(result.length).toEqual(vhdSize)
|
||||
const outputFileStream = await createWriteStream(outputVhdName)
|
||||
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
|
||||
const outputSize = fs.statSync(outputVhdName).size
|
||||
expect(outputSize).toEqual(vhdSize)
|
||||
})
|
||||
const forOwn = (object, cb) =>
|
||||
Object.keys(object).forEach(key => cb(object[key], key, object))
|
||||
|
||||
test('createVhdStreamWithLength can skip blank after last block and before footer', async () => {
|
||||
const initialSize = 4 * 1024
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdName = `${tempDir}/randomfile.vhd`
|
||||
const outputVhdName = `${tempDir}/output.vhd`
|
||||
await createRandomFile(rawFileName, initialSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdName)
|
||||
const vhdSize = fs.statSync(vhdName).size
|
||||
// read file footer
|
||||
const footer = await getStream.buffer(
|
||||
createReadStream(vhdName, { start: vhdSize - FOOTER_SIZE })
|
||||
describe('createVhdStreamWithLength', () => {
|
||||
forOwn(
|
||||
{
|
||||
// qemu-img requires this length or it fill with null bytes which breaks
|
||||
// the test
|
||||
'can extract length': 34816,
|
||||
|
||||
'can handle empty file': 0,
|
||||
},
|
||||
(size, title) =>
|
||||
it(title, async () => {
|
||||
const inputRaw = `${tempDir}/input.raw`
|
||||
await createRandomFile(inputRaw, size)
|
||||
|
||||
const inputVhd = `${tempDir}/input.vhd`
|
||||
await convert(RAW, inputRaw, VHD, inputVhd)
|
||||
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(inputVhd)
|
||||
)
|
||||
const { length } = result
|
||||
|
||||
const outputVhd = `${tempDir}/output.vhd`
|
||||
await pFromCallback(
|
||||
pipeline.bind(undefined, result, await createWriteStream(outputVhd))
|
||||
)
|
||||
|
||||
// ensure the guessed length correspond to the stream length
|
||||
const { size: outputSize } = await fs.stat(outputVhd)
|
||||
expect(length).toEqual(outputSize)
|
||||
|
||||
// ensure the generated VHD is correct and contains the same data
|
||||
const outputRaw = `${tempDir}/output.raw`
|
||||
await convert(VHD, outputVhd, RAW, outputRaw)
|
||||
await execa('cmp', [inputRaw, outputRaw])
|
||||
})
|
||||
)
|
||||
|
||||
// we'll override the footer
|
||||
const endOfFile = await createWriteStream(vhdName, {
|
||||
flags: 'r+',
|
||||
start: vhdSize - FOOTER_SIZE,
|
||||
it('can skip blank after the last block and before the footer', async () => {
|
||||
const initialSize = 4 * 1024
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdName = `${tempDir}/randomfile.vhd`
|
||||
const outputVhdName = `${tempDir}/output.vhd`
|
||||
await createRandomFile(rawFileName, initialSize)
|
||||
await convert(RAW, rawFileName, VHD, vhdName)
|
||||
const { size: vhdSize } = await fs.stat(vhdName)
|
||||
// read file footer
|
||||
const footer = await getStream.buffer(
|
||||
createReadStream(vhdName, { start: vhdSize - FOOTER_SIZE })
|
||||
)
|
||||
|
||||
// we'll override the footer
|
||||
const endOfFile = await createWriteStream(vhdName, {
|
||||
flags: 'r+',
|
||||
start: vhdSize - FOOTER_SIZE,
|
||||
})
|
||||
// write a blank over the previous footer
|
||||
await pFromCallback(cb => endOfFile.write(Buffer.alloc(FOOTER_SIZE), cb))
|
||||
// write the footer after the new blank
|
||||
await pFromCallback(cb => endOfFile.end(footer, cb))
|
||||
const { size: longerSize } = await fs.stat(vhdName)
|
||||
// check input file has been lengthened
|
||||
expect(longerSize).toEqual(vhdSize + FOOTER_SIZE)
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(vhdName)
|
||||
)
|
||||
expect(result.length).toEqual(vhdSize)
|
||||
const outputFileStream = await createWriteStream(outputVhdName)
|
||||
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
|
||||
const { size: outputSize } = await fs.stat(outputVhdName)
|
||||
// check out file has been shortened again
|
||||
expect(outputSize).toEqual(vhdSize)
|
||||
await execa('qemu-img', ['compare', outputVhdName, vhdName])
|
||||
})
|
||||
// write a blank over the previous footer
|
||||
await pFromCallback(cb => endOfFile.write(Buffer.alloc(FOOTER_SIZE), cb))
|
||||
// write the footer after the new blank
|
||||
await pFromCallback(cb => endOfFile.end(footer, cb))
|
||||
const longerSize = fs.statSync(vhdName).size
|
||||
// check input file has been lengthened
|
||||
expect(longerSize).toEqual(vhdSize + FOOTER_SIZE)
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(vhdName)
|
||||
)
|
||||
expect(result.length).toEqual(vhdSize)
|
||||
const outputFileStream = await createWriteStream(outputVhdName)
|
||||
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
|
||||
const outputSize = fs.statSync(outputVhdName).size
|
||||
// check out file has been shortened again
|
||||
expect(outputSize).toEqual(vhdSize)
|
||||
await execa('qemu-img', ['compare', outputVhdName, vhdName])
|
||||
})
|
||||
|
||||
@@ -63,10 +63,14 @@ export default async function createVhdStreamWithLength(stream) {
|
||||
stream.unshift(buf)
|
||||
}
|
||||
|
||||
const firstAndLastBlocks = getFirstAndLastBlocks(table)
|
||||
const footerOffset =
|
||||
getFirstAndLastBlocks(table).lastSector * SECTOR_SIZE +
|
||||
Math.ceil(header.blockSize / SECTOR_SIZE / 8 / SECTOR_SIZE) * SECTOR_SIZE +
|
||||
header.blockSize
|
||||
firstAndLastBlocks !== undefined
|
||||
? firstAndLastBlocks.lastSector * SECTOR_SIZE +
|
||||
Math.ceil(header.blockSize / SECTOR_SIZE / 8 / SECTOR_SIZE) *
|
||||
SECTOR_SIZE +
|
||||
header.blockSize
|
||||
: Math.ceil(streamPosition / SECTOR_SIZE) * SECTOR_SIZE
|
||||
|
||||
// ignore any data after footerOffset and push footerBuffer
|
||||
//
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
import assert from 'assert'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import checkFooter from './_checkFooter'
|
||||
import checkHeader from './_checkHeader'
|
||||
import constantStream from './_constant-stream'
|
||||
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
|
||||
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
|
||||
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
|
||||
@@ -232,64 +230,45 @@ export default class Vhd {
|
||||
// Write functions.
|
||||
// =================================================================
|
||||
|
||||
// Write a buffer/stream at a given position in a vhd file.
|
||||
// Write a buffer at a given position in a vhd file.
|
||||
async _write(data, offset) {
|
||||
debug(
|
||||
`_write offset=${offset} size=${
|
||||
Buffer.isBuffer(data) ? data.length : '???'
|
||||
}`
|
||||
)
|
||||
// TODO: could probably be merged in remote handlers.
|
||||
const stream = await this._handler.createOutputStream(this._path, {
|
||||
flags: 'r+',
|
||||
start: offset,
|
||||
})
|
||||
return Buffer.isBuffer(data)
|
||||
? new Promise((resolve, reject) => {
|
||||
stream.on('error', reject)
|
||||
stream.end(data, resolve)
|
||||
})
|
||||
: fromEvent(data.pipe(stream), 'finish')
|
||||
assert(Buffer.isBuffer(data))
|
||||
debug(`_write offset=${offset} size=${data.length}`)
|
||||
return this._handler.write(this._path, data, offset)
|
||||
}
|
||||
|
||||
async _freeFirstBlockSpace(spaceNeededBytes) {
|
||||
try {
|
||||
const { first, firstSector, lastSector } = getFirstAndLastBlocks(
|
||||
this.blockTable
|
||||
const firstAndLastBlocks = getFirstAndLastBlocks(this.blockTable)
|
||||
if (firstAndLastBlocks === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const { first, firstSector, lastSector } = firstAndLastBlocks
|
||||
const tableOffset = this.header.tableOffset
|
||||
const { batSize } = this
|
||||
const newMinSector = Math.ceil(
|
||||
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
|
||||
)
|
||||
if (
|
||||
tableOffset + batSize + spaceNeededBytes >=
|
||||
sectorsToBytes(firstSector)
|
||||
) {
|
||||
const { fullBlockSize } = this
|
||||
const newFirstSector = Math.max(
|
||||
lastSector + fullBlockSize / SECTOR_SIZE,
|
||||
newMinSector
|
||||
)
|
||||
const tableOffset = this.header.tableOffset
|
||||
const { batSize } = this
|
||||
const newMinSector = Math.ceil(
|
||||
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
|
||||
debug(
|
||||
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
|
||||
)
|
||||
if (
|
||||
tableOffset + batSize + spaceNeededBytes >=
|
||||
sectorsToBytes(firstSector)
|
||||
) {
|
||||
const { fullBlockSize } = this
|
||||
const newFirstSector = Math.max(
|
||||
lastSector + fullBlockSize / SECTOR_SIZE,
|
||||
newMinSector
|
||||
)
|
||||
debug(
|
||||
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
|
||||
)
|
||||
// copy the first block at the end
|
||||
const block = await this._read(
|
||||
sectorsToBytes(firstSector),
|
||||
fullBlockSize
|
||||
)
|
||||
await this._write(block, sectorsToBytes(newFirstSector))
|
||||
await this._setBatEntry(first, newFirstSector)
|
||||
await this.writeFooter(true)
|
||||
spaceNeededBytes -= this.fullBlockSize
|
||||
if (spaceNeededBytes > 0) {
|
||||
return this._freeFirstBlockSpace(spaceNeededBytes)
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
if (!e.noBlock) {
|
||||
throw e
|
||||
// copy the first block at the end
|
||||
const block = await this._read(sectorsToBytes(firstSector), fullBlockSize)
|
||||
await this._write(block, sectorsToBytes(newFirstSector))
|
||||
await this._setBatEntry(first, newFirstSector)
|
||||
await this.writeFooter(true)
|
||||
spaceNeededBytes -= this.fullBlockSize
|
||||
if (spaceNeededBytes > 0) {
|
||||
return this._freeFirstBlockSpace(spaceNeededBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -312,7 +291,7 @@ export default class Vhd {
|
||||
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
|
||||
)
|
||||
await this._write(
|
||||
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
|
||||
Buffer.alloc(maxTableEntries - prevMaxTableEntries, BUF_BLOCK_UNUSED),
|
||||
header.tableOffset + prevBat.length
|
||||
)
|
||||
await this.writeHeader()
|
||||
@@ -337,10 +316,7 @@ export default class Vhd {
|
||||
|
||||
await Promise.all([
|
||||
// Write an empty block and addr in vhd file.
|
||||
this._write(
|
||||
constantStream([0], this.fullBlockSize),
|
||||
sectorsToBytes(blockAddr)
|
||||
),
|
||||
this._write(Buffer.alloc(this.fullBlockSize), sectorsToBytes(blockAddr)),
|
||||
|
||||
this._setBatEntry(blockId, blockAddr),
|
||||
])
|
||||
@@ -388,9 +364,7 @@ export default class Vhd {
|
||||
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
|
||||
|
||||
debug(
|
||||
`writeBlockSectors at ${offset} block=${
|
||||
block.id
|
||||
}, sectors=${beginSectorId}...${endSectorId}`
|
||||
`writeBlockSectors at ${offset} block=${block.id}, sectors=${beginSectorId}...${endSectorId}`
|
||||
)
|
||||
|
||||
for (let i = beginSectorId; i < endSectorId; ++i) {
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^0.24.6"
|
||||
"xen-api": "^0.27.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
@@ -56,6 +56,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ console.log(xapi.pool.$master.$resident_VMs[0].name_label)
|
||||
A CLI is provided to help exploration and discovery of the XAPI.
|
||||
|
||||
```
|
||||
> xen-api https://xen1.company.net root
|
||||
> xen-api xen1.company.net root
|
||||
Password: ******
|
||||
root@xen1.company.net> xapi.status
|
||||
'connected'
|
||||
@@ -92,6 +92,14 @@ root@xen1.company.net> xapi.pool.$master.name_label
|
||||
'xen1'
|
||||
```
|
||||
|
||||
You can optionally prefix the address by a protocol: `https://` (default) or `http://`.
|
||||
|
||||
In case of error due to invalid or self-signed certificates you can use the `--allow-unauthorized` flag (or `--au`):
|
||||
|
||||
```
|
||||
> xen-api --au xen1.company.net root
|
||||
```
|
||||
|
||||
To ease searches, `find()` and `findAll()` functions are available:
|
||||
|
||||
```
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.24.6",
|
||||
"version": "0.27.1",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
@@ -33,12 +33,12 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"bind-property-descriptor": "^1.0.0",
|
||||
"blocked": "^1.2.1",
|
||||
"debug": "^4.0.1",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"iterable-backoff": "^0.1.0",
|
||||
"jest-diff": "^24.0.0",
|
||||
"json-rpc-protocol": "^0.13.1",
|
||||
"kindof": "^2.0.0",
|
||||
@@ -46,7 +46,7 @@
|
||||
"make-error": "^1.3.0",
|
||||
"minimist": "^1.2.0",
|
||||
"ms": "^2.1.1",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"pw": "0.0.4",
|
||||
"xmlrpc": "^1.3.2",
|
||||
"xo-collection": "^0.4.1"
|
||||
@@ -69,6 +69,7 @@
|
||||
"plot": "gnuplot -p memory-test.gnu",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import minimist from 'minimist'
|
||||
import pw from 'pw'
|
||||
import { asCallback, fromCallback } from 'promise-toolbox'
|
||||
import { filter, find, isArray } from 'lodash'
|
||||
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
|
||||
import { start as createRepl } from 'repl'
|
||||
|
||||
import { createClient } from './'
|
||||
@@ -25,6 +26,20 @@ function askPassword(prompt = 'Password: ') {
|
||||
})
|
||||
}
|
||||
|
||||
const { getPrototypeOf, ownKeys } = Reflect
|
||||
function getAllBoundDescriptors(object) {
|
||||
const descriptors = { __proto__: null }
|
||||
let current = object
|
||||
do {
|
||||
ownKeys(current).forEach(key => {
|
||||
if (!(key in descriptors)) {
|
||||
descriptors[key] = getBoundPropertyDescriptor(current, key, object)
|
||||
}
|
||||
})
|
||||
} while ((current = getPrototypeOf(current)) !== null)
|
||||
return descriptors
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const usage = 'Usage: xen-api <url> [<user> [<password>]]'
|
||||
@@ -78,11 +93,17 @@ const main = async args => {
|
||||
const repl = createRepl({
|
||||
prompt: `${xapi._humanId}> `,
|
||||
})
|
||||
repl.context.xapi = xapi
|
||||
|
||||
repl.context.diff = (a, b) => console.log('%s', diff(a, b))
|
||||
repl.context.find = predicate => find(xapi.objects.all, predicate)
|
||||
repl.context.findAll = predicate => filter(xapi.objects.all, predicate)
|
||||
{
|
||||
const ctx = repl.context
|
||||
ctx.xapi = xapi
|
||||
|
||||
ctx.diff = (a, b) => console.log('%s', diff(a, b))
|
||||
ctx.find = predicate => find(xapi.objects.all, predicate)
|
||||
ctx.findAll = predicate => filter(xapi.objects.all, predicate)
|
||||
|
||||
Object.defineProperties(ctx, getAllBoundDescriptors(xapi))
|
||||
}
|
||||
|
||||
// Make the REPL waits for promise completion.
|
||||
repl.eval = (evaluate => (cmd, context, filename, cb) => {
|
||||
|
||||
@@ -4,31 +4,33 @@ import { pDelay } from 'promise-toolbox'
|
||||
|
||||
import { createClient } from './'
|
||||
|
||||
const xapi = (() => {
|
||||
const [, , url, user, password] = process.argv
|
||||
|
||||
return createClient({
|
||||
auth: { user, password },
|
||||
async function main([url]) {
|
||||
const xapi = createClient({
|
||||
allowUnauthorized: true,
|
||||
url,
|
||||
watchEvents: false,
|
||||
})
|
||||
})()
|
||||
await xapi.connect()
|
||||
|
||||
xapi
|
||||
.connect()
|
||||
|
||||
// Get the pool record's ref.
|
||||
.then(() => xapi.call('pool.get_all'))
|
||||
|
||||
// Injects lots of events.
|
||||
.then(([poolRef]) => {
|
||||
const loop = () =>
|
||||
pDelay
|
||||
.call(
|
||||
xapi.call('event.inject', 'pool', poolRef),
|
||||
10 // A small delay is required to avoid overloading the Xen API.
|
||||
)
|
||||
.then(loop)
|
||||
|
||||
return loop()
|
||||
let loop = true
|
||||
process.on('SIGINT', () => {
|
||||
loop = false
|
||||
})
|
||||
|
||||
const { pool } = xapi
|
||||
// eslint-disable-next-line no-unmodified-loop-condition
|
||||
while (loop) {
|
||||
await pool.update_other_config(
|
||||
'xo:injectEvents',
|
||||
Math.random()
|
||||
.toString(36)
|
||||
.slice(2)
|
||||
)
|
||||
await pDelay(1e2)
|
||||
}
|
||||
|
||||
await pool.update_other_config('xo:injectEvents', null)
|
||||
await xapi.disconnect()
|
||||
}
|
||||
|
||||
main(process.argv.slice(2)).catch(console.error)
|
||||
|
||||
@@ -25,5 +25,8 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"xo-common": "^0.2.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
"nice-pipe": "0.0.0",
|
||||
"pretty-ms": "^4.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"pump": "^3.0.0",
|
||||
"pw": "^0.0.4",
|
||||
"strip-indent": "^2.0.0",
|
||||
@@ -64,6 +64,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ const nicePipe = require('nice-pipe')
|
||||
const pairs = require('lodash/toPairs')
|
||||
const pick = require('lodash/pick')
|
||||
const pump = require('pump')
|
||||
const startsWith = require('lodash/startsWith')
|
||||
const prettyMs = require('pretty-ms')
|
||||
const progressStream = require('progress-stream')
|
||||
const pw = require('pw')
|
||||
@@ -81,7 +80,7 @@ function parseParameters(args) {
|
||||
const name = matches[1]
|
||||
let value = matches[2]
|
||||
|
||||
if (startsWith(value, 'json:')) {
|
||||
if (value.startsWith('json:')) {
|
||||
value = JSON.parse(value.slice(5))
|
||||
}
|
||||
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"csv-parser": "^2.1.0",
|
||||
@@ -43,7 +43,7 @@
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^11.11.4",
|
||||
"@types/node": "^12.0.2",
|
||||
"@types/through2": "^2.0.31",
|
||||
"tslint": "^5.9.1",
|
||||
"tslint-config-standard": "^8.0.1",
|
||||
@@ -55,6 +55,7 @@
|
||||
"lint": "tslint 'src/*.ts'",
|
||||
"posttest": "yarn run lint",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"start": "node dist/index.js"
|
||||
"start": "node dist/index.js",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"jsonrpc-websocket-client": "^0.4.1",
|
||||
"jsonrpc-websocket-client": "^0.5.0",
|
||||
"lodash": "^4.17.2",
|
||||
"make-error": "^1.0.4"
|
||||
},
|
||||
@@ -49,6 +49,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import JsonRpcWebSocketClient, { OPEN, CLOSED } from 'jsonrpc-websocket-client'
|
||||
import { BaseError } from 'make-error'
|
||||
import { startsWith } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -35,7 +34,7 @@ export default class Xo extends JsonRpcWebSocketClient {
|
||||
}
|
||||
|
||||
call(method, args, i) {
|
||||
if (startsWith(method, 'session.')) {
|
||||
if (method.startsWith('session.')) {
|
||||
return Promise.reject(
|
||||
new XoError('session.*() methods are disabled from this interface')
|
||||
)
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,5 +41,6 @@
|
||||
"build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -49,5 +49,6 @@
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-ldap",
|
||||
"version": "0.6.4",
|
||||
"version": "0.6.5",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "LDAP authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -39,7 +39,7 @@
|
||||
"inquirer": "^6.0.0",
|
||||
"ldapjs": "^1.0.1",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.12.1"
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -55,5 +55,6 @@
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -230,10 +230,9 @@ class AuthLdap {
|
||||
logger(`attempting to bind as ${entry.objectName}`)
|
||||
await bind(entry.objectName, password)
|
||||
logger(
|
||||
`successfully bound as ${
|
||||
entry.objectName
|
||||
} => ${username} authenticated`
|
||||
`successfully bound as ${entry.objectName} => ${username} authenticated`
|
||||
)
|
||||
logger(JSON.stringify(entry, null, 2))
|
||||
return { username }
|
||||
} catch (error) {
|
||||
logger(`failed to bind as ${entry.objectName}: ${error.message}`)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-saml",
|
||||
"version": "0.5.3",
|
||||
"version": "0.6.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "SAML authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -33,7 +33,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"passport-saml": "^1.0.0"
|
||||
"passport-saml": "^1.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -50,5 +50,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -24,7 +24,10 @@ export const configurationSchema = {
|
||||
},
|
||||
usernameField: {
|
||||
title: 'Username field',
|
||||
description: 'Field to use as the XO username',
|
||||
description: `Field to use as the XO username
|
||||
|
||||
You should try \`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress\` if you are using Microsoft Azure Active Directory.
|
||||
`,
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-backup-reports",
|
||||
"version": "0.15.0",
|
||||
"version": "0.16.2",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Backup reports plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -36,6 +36,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.13.1",
|
||||
"moment-timezone": "^0.5.13"
|
||||
@@ -43,6 +44,8 @@
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.3",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
@@ -55,5 +58,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import humanFormat from 'human-format'
|
||||
import moment from 'moment-timezone'
|
||||
import { forEach, get, startCase } from 'lodash'
|
||||
import { forEach, groupBy, startCase } from 'lodash'
|
||||
import pkg from '../package'
|
||||
|
||||
const logger = createLogger('xo:xo-server-backup-reports')
|
||||
|
||||
export const configurationSchema = {
|
||||
type: 'object',
|
||||
|
||||
@@ -46,6 +49,9 @@ export const testSchema = {
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const INDENT = ' '
|
||||
const UNKNOWN_ITEM = 'Unknown'
|
||||
|
||||
const ICON_FAILURE = '🚨'
|
||||
const ICON_INTERRUPTED = '⚠️'
|
||||
const ICON_SKIPPED = '⏩'
|
||||
@@ -60,7 +66,7 @@ const STATUS_ICON = {
|
||||
}
|
||||
|
||||
const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a'
|
||||
const createDateFormater = timezone =>
|
||||
const createDateFormatter = timezone =>
|
||||
timezone !== undefined
|
||||
? timestamp =>
|
||||
moment(timestamp)
|
||||
@@ -86,10 +92,6 @@ const formatSpeed = (bytes, milliseconds) =>
|
||||
})
|
||||
: 'N/A'
|
||||
|
||||
const logError = e => {
|
||||
console.error('backup report error:', e)
|
||||
}
|
||||
|
||||
const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern'
|
||||
const NO_SUCH_OBJECT_ERROR = 'no such object'
|
||||
const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain'
|
||||
@@ -100,40 +102,116 @@ const isSkippedError = error =>
|
||||
error.message === UNHEALTHY_VDI_CHAIN_ERROR ||
|
||||
error.message === NO_SUCH_OBJECT_ERROR
|
||||
|
||||
const INDENT = ' '
|
||||
const createGetTemporalDataMarkdown = formatDate => (
|
||||
start,
|
||||
end,
|
||||
nbIndent = 0
|
||||
) => {
|
||||
const indent = INDENT.repeat(nbIndent)
|
||||
// ===================================================================
|
||||
|
||||
const markdown = [`${indent}- **Start time**: ${formatDate(start)}`]
|
||||
const STATUS = ['failure', 'interrupted', 'skipped', 'success']
|
||||
const TITLE_BY_STATUS = {
|
||||
failure: n => `## ${n} Failure${n === 1 ? '' : 's'}`,
|
||||
interrupted: n => `## ${n} Interrupted`,
|
||||
skipped: n => `## ${n} Skipped`,
|
||||
success: n => `## ${n} Success${n === 1 ? '' : 'es'}`,
|
||||
}
|
||||
|
||||
const getTemporalDataMarkdown = (end, start, formatDate) => {
|
||||
const markdown = [`- **Start time**: ${formatDate(start)}`]
|
||||
if (end !== undefined) {
|
||||
markdown.push(`${indent}- **End time**: ${formatDate(end)}`)
|
||||
markdown.push(`- **End time**: ${formatDate(end)}`)
|
||||
const duration = end - start
|
||||
if (duration >= 1) {
|
||||
markdown.push(`${indent}- **Duration**: ${formatDuration(duration)}`)
|
||||
markdown.push(`- **Duration**: ${formatDuration(duration)}`)
|
||||
}
|
||||
}
|
||||
return markdown
|
||||
}
|
||||
|
||||
const addWarnings = (text, warnings, nbIndent = 0) => {
|
||||
if (warnings === undefined) {
|
||||
const getWarningsMarkdown = (warnings = []) =>
|
||||
warnings.map(({ message }) => `- **${ICON_WARNING} ${message}**`)
|
||||
|
||||
const getErrorMarkdown = task => {
|
||||
let message
|
||||
if (
|
||||
task.status === 'success' ||
|
||||
(message = task.result?.message ?? task.result?.code) === undefined
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
const indent = INDENT.repeat(nbIndent)
|
||||
warnings.forEach(({ message }) => {
|
||||
text.push(`${indent}- **${ICON_WARNING} ${message}**`)
|
||||
})
|
||||
const label = task.status === 'skipped' ? 'Reason' : 'Error'
|
||||
return `- **${label}**: ${message}`
|
||||
}
|
||||
|
||||
const MARKDOWN_BY_TYPE = {
|
||||
pool(task, { formatDate }) {
|
||||
const { id, pool = {}, poolMaster = {} } = task.data
|
||||
const name = pool.name_label || poolMaster.name_label || UNKNOWN_ITEM
|
||||
|
||||
return {
|
||||
body: [
|
||||
pool.uuid !== undefined
|
||||
? `- **UUID**: ${pool.uuid}`
|
||||
: `- **ID**: ${id}`,
|
||||
...getTemporalDataMarkdown(task.end, task.start, formatDate),
|
||||
getErrorMarkdown(task),
|
||||
],
|
||||
title: `[pool] ${name}`,
|
||||
}
|
||||
},
|
||||
xo(task, { formatDate, jobName }) {
|
||||
return {
|
||||
body: [
|
||||
...getTemporalDataMarkdown(task.end, task.start, formatDate),
|
||||
getErrorMarkdown(task),
|
||||
],
|
||||
title: `[XO] ${jobName}`,
|
||||
}
|
||||
},
|
||||
async remote(task, { formatDate, xo }) {
|
||||
const id = task.data.id
|
||||
const name = await xo.getRemote(id).then(
|
||||
({ name }) => name,
|
||||
error => {
|
||||
logger.warn(error)
|
||||
return UNKNOWN_ITEM
|
||||
}
|
||||
)
|
||||
return {
|
||||
body: [
|
||||
`- **ID**: ${id}`,
|
||||
...getTemporalDataMarkdown(task.end, task.start, formatDate),
|
||||
getErrorMarkdown(task),
|
||||
],
|
||||
title: `[remote] ${name}`,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
const getMarkdown = (task, props) =>
|
||||
MARKDOWN_BY_TYPE[(task.data?.type)]?.(task, props)
|
||||
|
||||
const toMarkdown = parts => {
|
||||
const lines = []
|
||||
let indentLevel = 0
|
||||
|
||||
const helper = part => {
|
||||
if (typeof part === 'string') {
|
||||
lines.push(`${INDENT.repeat(indentLevel)}${part}`)
|
||||
} else if (Array.isArray(part)) {
|
||||
++indentLevel
|
||||
part.forEach(helper)
|
||||
--indentLevel
|
||||
}
|
||||
}
|
||||
helper(parts)
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
class BackupReportsXoPlugin {
|
||||
constructor(xo) {
|
||||
this._xo = xo
|
||||
this._report = this._wrapper.bind(this)
|
||||
this._report = this._report.bind(this)
|
||||
}
|
||||
|
||||
configure({ toMails, toXmpp }) {
|
||||
@@ -146,76 +224,174 @@ class BackupReportsXoPlugin {
|
||||
}
|
||||
|
||||
test({ runId }) {
|
||||
return this._backupNgListener(undefined, undefined, undefined, runId)
|
||||
return this._report(runId, undefined, true)
|
||||
}
|
||||
|
||||
unload() {
|
||||
this._xo.removeListener('job:terminated', this._report)
|
||||
}
|
||||
|
||||
_wrapper(status, job, schedule, runJobId) {
|
||||
if (job.type === 'metadataBackup') {
|
||||
return
|
||||
}
|
||||
async _report(runJobId, { type, status } = {}, force) {
|
||||
const xo = this._xo
|
||||
try {
|
||||
if (type === 'call') {
|
||||
return this._legacyVmHandler(status)
|
||||
}
|
||||
|
||||
return new Promise(resolve =>
|
||||
resolve(
|
||||
job.type === 'backup'
|
||||
? this._backupNgListener(status, job, schedule, runJobId)
|
||||
: this._listener(status, job, schedule, runJobId)
|
||||
)
|
||||
).catch(logError)
|
||||
const log = await xo.getBackupNgLogs(runJobId)
|
||||
if (log === undefined) {
|
||||
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
|
||||
}
|
||||
|
||||
const reportWhen = log.data.reportWhen
|
||||
if (
|
||||
!force &&
|
||||
(reportWhen === 'never' ||
|
||||
// Handle improper value introduced by:
|
||||
// https://github.com/vatesfr/xen-orchestra/commit/753ee994f2948bbaca9d3161eaab82329a682773#diff-9c044ab8a42ed6576ea927a64c1ec3ebR105
|
||||
reportWhen === 'Never' ||
|
||||
(reportWhen === 'failure' && log.status === 'success'))
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
const [job, schedule] = await Promise.all([
|
||||
await xo.getJob(log.jobId),
|
||||
await xo.getSchedule(log.scheduleId).catch(error => {
|
||||
logger.warn(error)
|
||||
}),
|
||||
])
|
||||
|
||||
if (job.type === 'backup') {
|
||||
return this._ngVmHandler(log, job, schedule, force)
|
||||
} else if (job.type === 'metadataBackup') {
|
||||
return this._metadataHandler(log, job, schedule, force)
|
||||
}
|
||||
|
||||
throw new Error(`Unknown backup job type: ${job.type}`)
|
||||
} catch (error) {
|
||||
logger.warn(error)
|
||||
}
|
||||
}
|
||||
|
||||
async _backupNgListener(_1, _2, schedule, runJobId) {
|
||||
async _metadataHandler(log, { name: jobName }, schedule, force) {
|
||||
const xo = this._xo
|
||||
const log = await xo.getBackupNgLogs(runJobId)
|
||||
if (log === undefined) {
|
||||
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
|
||||
|
||||
const formatDate = createDateFormatter(schedule?.timezone)
|
||||
|
||||
const tasksByStatus = groupBy(log.tasks, 'status')
|
||||
const n = log.tasks?.length ?? 0
|
||||
const nSuccesses = tasksByStatus.success?.length ?? 0
|
||||
|
||||
if (!force && log.data.reportWhen === 'failure') {
|
||||
delete tasksByStatus.success
|
||||
}
|
||||
|
||||
// header
|
||||
const markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **Job ID**: ${log.jobId}`,
|
||||
`- **Job name**: ${jobName}`,
|
||||
`- **Run ID**: ${log.id}`,
|
||||
...getTemporalDataMarkdown(log.end, log.start, formatDate),
|
||||
n !== 0 && `- **Successes**: ${nSuccesses} / ${n}`,
|
||||
...getWarningsMarkdown(log.warnings),
|
||||
getErrorMarkdown(log),
|
||||
]
|
||||
|
||||
const nagiosText = []
|
||||
|
||||
// body
|
||||
for (const status of STATUS) {
|
||||
const tasks = tasksByStatus[status]
|
||||
if (tasks === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
// tasks header
|
||||
markdown.push('---', '', TITLE_BY_STATUS[status](tasks.length))
|
||||
|
||||
// tasks body
|
||||
for (const task of tasks) {
|
||||
const taskMarkdown = await getMarkdown(task, {
|
||||
formatDate,
|
||||
jobName: log.jobName,
|
||||
})
|
||||
if (taskMarkdown === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const { title, body } = taskMarkdown
|
||||
const subMarkdown = [...body, ...getWarningsMarkdown(task.warnings)]
|
||||
|
||||
if (task.status !== 'success') {
|
||||
nagiosText.push(`[${task.status}] ${title}`)
|
||||
}
|
||||
|
||||
for (const subTask of task.tasks ?? []) {
|
||||
const taskMarkdown = await getMarkdown(subTask, { formatDate, xo })
|
||||
if (taskMarkdown === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const icon = STATUS_ICON[subTask.status]
|
||||
const { title, body } = taskMarkdown
|
||||
subMarkdown.push([
|
||||
`- **${title}** ${icon}`,
|
||||
[...body, ...getWarningsMarkdown(subTask.warnings)],
|
||||
])
|
||||
}
|
||||
markdown.push('', '', `### ${title}`, ...subMarkdown)
|
||||
}
|
||||
}
|
||||
|
||||
// footer
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${log.status} − Metadata backup report for ${
|
||||
log.jobName
|
||||
} ${STATUS_ICON[log.status]}`,
|
||||
markdown: toMarkdown(markdown),
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Metadata backup report for ${log.jobName}`
|
||||
: `[Xen Orchestra] [${log.status}] Metadata backup report for ${
|
||||
log.jobName
|
||||
} - ${nagiosText.join(' ')}`,
|
||||
})
|
||||
}
|
||||
|
||||
async _ngVmHandler(log, { name: jobName }, schedule, force) {
|
||||
const xo = this._xo
|
||||
|
||||
const { reportWhen, mode } = log.data || {}
|
||||
if (
|
||||
reportWhen === 'never' ||
|
||||
(log.status === 'success' && reportWhen === 'failure')
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
if (schedule === undefined) {
|
||||
schedule = await xo.getSchedule(log.scheduleId)
|
||||
}
|
||||
const formatDate = createDateFormatter(schedule?.timezone)
|
||||
|
||||
const jobName = (await xo.getJob(log.jobId, 'backup')).name
|
||||
const formatDate = createDateFormater(schedule.timezone)
|
||||
const getTemporalDataMarkdown = createGetTemporalDataMarkdown(formatDate)
|
||||
|
||||
if (
|
||||
(log.status === 'failure' || log.status === 'skipped') &&
|
||||
log.result !== undefined
|
||||
) {
|
||||
let markdown = [
|
||||
if (log.tasks === undefined) {
|
||||
const markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **Job ID**: ${log.jobId}`,
|
||||
`- **Run ID**: ${runJobId}`,
|
||||
`- **Run ID**: ${log.id}`,
|
||||
`- **mode**: ${mode}`,
|
||||
...getTemporalDataMarkdown(log.start, log.end),
|
||||
`- **Error**: ${log.result.message}`,
|
||||
...getTemporalDataMarkdown(log.end, log.start, formatDate),
|
||||
getErrorMarkdown(log),
|
||||
...getWarningsMarkdown(log.warnings),
|
||||
'---',
|
||||
'',
|
||||
`*${pkg.name} v${pkg.version}*`,
|
||||
]
|
||||
addWarnings(markdown, log.warnings)
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${
|
||||
log.status
|
||||
} − Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
|
||||
markdown,
|
||||
markdown: toMarkdown(markdown),
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${
|
||||
log.status
|
||||
}] Backup report for ${jobName} - Error : ${log.result.message}`,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${log.status}] Backup report for ${jobName} - Error : ${log.result.message}`,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -231,7 +407,7 @@ class BackupReportsXoPlugin {
|
||||
let nSkipped = 0
|
||||
let nInterrupted = 0
|
||||
for (const taskLog of log.tasks) {
|
||||
if (taskLog.status === 'success' && reportWhen === 'failure') {
|
||||
if (!force && taskLog.status === 'success' && reportWhen === 'failure') {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -244,16 +420,16 @@ class BackupReportsXoPlugin {
|
||||
`### ${vm !== undefined ? vm.name_label : 'VM not found'}`,
|
||||
'',
|
||||
`- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`,
|
||||
...getTemporalDataMarkdown(taskLog.start, taskLog.end),
|
||||
...getTemporalDataMarkdown(taskLog.end, taskLog.start, formatDate),
|
||||
...getWarningsMarkdown(taskLog.warnings),
|
||||
]
|
||||
addWarnings(text, taskLog.warnings)
|
||||
|
||||
const failedSubTasks = []
|
||||
const snapshotText = []
|
||||
const srsText = []
|
||||
const remotesText = []
|
||||
|
||||
for (const subTaskLog of taskLog.tasks || []) {
|
||||
for (const subTaskLog of taskLog.tasks ?? []) {
|
||||
if (
|
||||
subTaskLog.message !== 'export' &&
|
||||
subTaskLog.message !== 'snapshot'
|
||||
@@ -262,29 +438,36 @@ class BackupReportsXoPlugin {
|
||||
}
|
||||
|
||||
const icon = STATUS_ICON[subTaskLog.status]
|
||||
const errorMessage = ` - **Error**: ${get(
|
||||
subTaskLog.result,
|
||||
'message'
|
||||
)}`
|
||||
const type = subTaskLog.data?.type
|
||||
const errorMarkdown = getErrorMarkdown(subTaskLog)
|
||||
|
||||
if (subTaskLog.message === 'snapshot') {
|
||||
snapshotText.push(
|
||||
`- **Snapshot** ${icon}`,
|
||||
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 1)
|
||||
)
|
||||
} else if (subTaskLog.data.type === 'remote') {
|
||||
snapshotText.push(`- **Snapshot** ${icon}`, [
|
||||
...getTemporalDataMarkdown(
|
||||
subTaskLog.end,
|
||||
subTaskLog.start,
|
||||
formatDate
|
||||
),
|
||||
])
|
||||
} else if (type === 'remote') {
|
||||
const id = subTaskLog.data.id
|
||||
const remote = await xo.getRemote(id).catch(() => {})
|
||||
remotesText.push(
|
||||
` - **${
|
||||
remote !== undefined ? remote.name : `Remote Not found`
|
||||
}** (${id}) ${icon}`,
|
||||
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
|
||||
)
|
||||
addWarnings(remotesText, subTaskLog.warnings, 2)
|
||||
const remote = await xo.getRemote(id).catch(error => {
|
||||
logger.warn(error)
|
||||
})
|
||||
const title = remote !== undefined ? remote.name : `Remote Not found`
|
||||
|
||||
remotesText.push(`- **${title}** (${id}) ${icon}`, [
|
||||
...getTemporalDataMarkdown(
|
||||
subTaskLog.end,
|
||||
subTaskLog.start,
|
||||
formatDate
|
||||
),
|
||||
...getWarningsMarkdown(subTaskLog.warnings),
|
||||
errorMarkdown,
|
||||
])
|
||||
|
||||
if (subTaskLog.status === 'failure') {
|
||||
failedSubTasks.push(remote !== undefined ? remote.name : id)
|
||||
remotesText.push('', errorMessage)
|
||||
}
|
||||
} else {
|
||||
const id = subTaskLog.data.id
|
||||
@@ -294,14 +477,17 @@ class BackupReportsXoPlugin {
|
||||
} catch (e) {}
|
||||
const [srName, srUuid] =
|
||||
sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, id]
|
||||
srsText.push(
|
||||
` - **${srName}** (${srUuid}) ${icon}`,
|
||||
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
|
||||
)
|
||||
addWarnings(srsText, subTaskLog.warnings, 2)
|
||||
srsText.push(`- **${srName}** (${srUuid}) ${icon}`, [
|
||||
...getTemporalDataMarkdown(
|
||||
subTaskLog.end,
|
||||
subTaskLog.start,
|
||||
formatDate
|
||||
),
|
||||
...getWarningsMarkdown(subTaskLog.warnings),
|
||||
errorMarkdown,
|
||||
])
|
||||
if (subTaskLog.status === 'failure') {
|
||||
failedSubTasks.push(sr !== undefined ? sr.name_label : id)
|
||||
srsText.push('', errorMessage)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -313,53 +499,48 @@ class BackupReportsXoPlugin {
|
||||
return
|
||||
}
|
||||
|
||||
const operationInfoText = []
|
||||
addWarnings(operationInfoText, operationLog.warnings, 3)
|
||||
if (operationLog.status === 'success') {
|
||||
const size = operationLog.result.size
|
||||
const size = operationLog.result?.size
|
||||
if (size > 0) {
|
||||
if (operationLog.message === 'merge') {
|
||||
globalMergeSize += size
|
||||
} else {
|
||||
globalTransferSize += size
|
||||
}
|
||||
}
|
||||
|
||||
operationInfoText.push(
|
||||
` - **Size**: ${formatSize(size)}`,
|
||||
` - **Speed**: ${formatSpeed(
|
||||
size,
|
||||
operationLog.end - operationLog.start
|
||||
)}`
|
||||
)
|
||||
} else if (get(operationLog.result, 'message') !== undefined) {
|
||||
operationInfoText.push(
|
||||
` - **Error**: ${get(operationLog.result, 'message')}`
|
||||
)
|
||||
}
|
||||
const operationText = [
|
||||
` - **${operationLog.message}** ${
|
||||
STATUS_ICON[operationLog.status]
|
||||
}`,
|
||||
...getTemporalDataMarkdown(operationLog.start, operationLog.end, 3),
|
||||
...operationInfoText,
|
||||
].join('\n')
|
||||
if (get(subTaskLog, 'data.type') === 'remote') {
|
||||
`- **${operationLog.message}** ${STATUS_ICON[operationLog.status]}`,
|
||||
[
|
||||
...getTemporalDataMarkdown(
|
||||
operationLog.end,
|
||||
operationLog.start,
|
||||
formatDate
|
||||
),
|
||||
size > 0 && `- **Size**: ${formatSize(size)}`,
|
||||
size > 0 &&
|
||||
`- **Speed**: ${formatSpeed(
|
||||
size,
|
||||
operationLog.end - operationLog.start
|
||||
)}`,
|
||||
...getWarningsMarkdown(operationLog.warnings),
|
||||
getErrorMarkdown(operationLog),
|
||||
],
|
||||
]
|
||||
if (type === 'remote') {
|
||||
remotesText.push(operationText)
|
||||
remotesText.join('\n')
|
||||
}
|
||||
if (get(subTaskLog, 'data.type') === 'SR') {
|
||||
} else if (type === 'SR') {
|
||||
srsText.push(operationText)
|
||||
srsText.join('\n')
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (srsText.length !== 0) {
|
||||
srsText.unshift(`- **SRs**`)
|
||||
}
|
||||
if (remotesText.length !== 0) {
|
||||
remotesText.unshift(`- **Remotes**`)
|
||||
}
|
||||
const subText = [...snapshotText, '', ...srsText, '', ...remotesText]
|
||||
const subText = [
|
||||
...snapshotText,
|
||||
srsText.length !== 0 && `- **SRs**`,
|
||||
srsText,
|
||||
remotesText.length !== 0 && `- **Remotes**`,
|
||||
remotesText,
|
||||
]
|
||||
if (taskLog.result !== undefined) {
|
||||
if (taskLog.status === 'skipped') {
|
||||
++nSkipped
|
||||
@@ -369,8 +550,7 @@ class BackupReportsXoPlugin {
|
||||
taskLog.result.message === UNHEALTHY_VDI_CHAIN_ERROR
|
||||
? UNHEALTHY_VDI_CHAIN_MESSAGE
|
||||
: taskLog.result.message
|
||||
}`,
|
||||
''
|
||||
}`
|
||||
)
|
||||
nagiosText.push(
|
||||
`[(Skipped) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
|
||||
@@ -379,11 +559,7 @@ class BackupReportsXoPlugin {
|
||||
)
|
||||
} else {
|
||||
++nFailures
|
||||
failedVmsText.push(
|
||||
...text,
|
||||
`- **Error**: ${taskLog.result.message}`,
|
||||
''
|
||||
)
|
||||
failedVmsText.push(...text, `- **Error**: ${taskLog.result.message}`)
|
||||
|
||||
nagiosText.push(
|
||||
`[(Failed) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
|
||||
@@ -394,7 +570,7 @@ class BackupReportsXoPlugin {
|
||||
} else {
|
||||
if (taskLog.status === 'failure') {
|
||||
++nFailures
|
||||
failedVmsText.push(...text, '', '', ...subText, '')
|
||||
failedVmsText.push(...text, ...subText)
|
||||
nagiosText.push(
|
||||
`[${
|
||||
vm !== undefined ? vm.name_label : 'undefined'
|
||||
@@ -402,37 +578,34 @@ class BackupReportsXoPlugin {
|
||||
)
|
||||
} else if (taskLog.status === 'interrupted') {
|
||||
++nInterrupted
|
||||
interruptedVmsText.push(...text, '', '', ...subText, '')
|
||||
interruptedVmsText.push(...text, ...subText)
|
||||
nagiosText.push(
|
||||
`[(Interrupted) ${vm !== undefined ? vm.name_label : 'undefined'}]`
|
||||
)
|
||||
} else {
|
||||
successfulVmsText.push(...text, '', '', ...subText, '')
|
||||
successfulVmsText.push(...text, ...subText)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const nVms = log.tasks.length
|
||||
const nSuccesses = nVms - nFailures - nSkipped - nInterrupted
|
||||
let markdown = [
|
||||
const markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **Job ID**: ${log.jobId}`,
|
||||
`- **Run ID**: ${runJobId}`,
|
||||
`- **Run ID**: ${log.id}`,
|
||||
`- **mode**: ${mode}`,
|
||||
...getTemporalDataMarkdown(log.start, log.end),
|
||||
...getTemporalDataMarkdown(log.end, log.start, formatDate),
|
||||
`- **Successes**: ${nSuccesses} / ${nVms}`,
|
||||
globalTransferSize !== 0 &&
|
||||
`- **Transfer size**: ${formatSize(globalTransferSize)}`,
|
||||
globalMergeSize !== 0 &&
|
||||
`- **Merge size**: ${formatSize(globalMergeSize)}`,
|
||||
...getWarningsMarkdown(log.warnings),
|
||||
'',
|
||||
]
|
||||
|
||||
if (globalTransferSize !== 0) {
|
||||
markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`)
|
||||
}
|
||||
if (globalMergeSize !== 0) {
|
||||
markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`)
|
||||
}
|
||||
addWarnings(markdown, log.warnings)
|
||||
markdown.push('')
|
||||
|
||||
if (nFailures !== 0) {
|
||||
markdown.push(
|
||||
'---',
|
||||
@@ -457,7 +630,7 @@ class BackupReportsXoPlugin {
|
||||
)
|
||||
}
|
||||
|
||||
if (nSuccesses !== 0 && reportWhen !== 'failure') {
|
||||
if (nSuccesses !== 0 && (force || reportWhen !== 'failure')) {
|
||||
markdown.push(
|
||||
'---',
|
||||
'',
|
||||
@@ -468,9 +641,8 @@ class BackupReportsXoPlugin {
|
||||
}
|
||||
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
markdown,
|
||||
markdown: toMarkdown(markdown),
|
||||
subject: `[Xen Orchestra] ${log.status} − Backup report for ${jobName} ${
|
||||
STATUS_ICON[log.status]
|
||||
}`,
|
||||
@@ -510,9 +682,9 @@ class BackupReportsXoPlugin {
|
||||
])
|
||||
}
|
||||
|
||||
_listener(status) {
|
||||
_legacyVmHandler(status) {
|
||||
const { calls, timezone, error } = status
|
||||
const formatDate = createDateFormater(timezone)
|
||||
const formatDate = createDateFormatter(timezone)
|
||||
|
||||
if (status.error !== undefined) {
|
||||
const [globalStatus, icon] =
|
||||
@@ -537,9 +709,7 @@ class BackupReportsXoPlugin {
|
||||
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
|
||||
markdown,
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${
|
||||
error.message
|
||||
}`,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${error.message}`,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"http-request-plus": "^0.8.0",
|
||||
"jsonrpc-websocket-client": "^0.4.1"
|
||||
"jsonrpc-websocket-client": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -49,5 +49,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -44,5 +44,6 @@
|
||||
"build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -189,9 +189,7 @@ export default class DensityPlan extends Plan {
|
||||
const { vm, destination } = move
|
||||
const xapiDest = this.xo.getXapi(destination)
|
||||
debug(
|
||||
`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${
|
||||
vm.$container
|
||||
}).`
|
||||
`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${vm.$container}).`
|
||||
)
|
||||
return xapiDest.migrateVm(
|
||||
vm._xapiId,
|
||||
|
||||
@@ -126,9 +126,7 @@ export default class PerformancePlan extends Plan {
|
||||
destinationAverages.memoryFree -= vmAverages.memory
|
||||
|
||||
debug(
|
||||
`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${
|
||||
exceededHost.id
|
||||
}).`
|
||||
`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id}).`
|
||||
)
|
||||
optimizationsCount++
|
||||
|
||||
@@ -143,9 +141,7 @@ export default class PerformancePlan extends Plan {
|
||||
|
||||
await Promise.all(promises)
|
||||
debug(
|
||||
`Performance mode: ${optimizationsCount} optimizations for Host (${
|
||||
exceededHost.id
|
||||
}).`
|
||||
`Performance mode: ${optimizationsCount} optimizations for Host (${exceededHost.id}).`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,5 +42,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -183,9 +183,7 @@ export const configurationSchema = {
|
||||
description: Object.keys(HOST_FUNCTIONS)
|
||||
.map(
|
||||
k =>
|
||||
` * ${k} (${HOST_FUNCTIONS[k].unit}): ${
|
||||
HOST_FUNCTIONS[k].description
|
||||
}`
|
||||
` * ${k} (${HOST_FUNCTIONS[k].unit}): ${HOST_FUNCTIONS[k].description}`
|
||||
)
|
||||
.join('\n'),
|
||||
type: 'string',
|
||||
@@ -233,9 +231,7 @@ export const configurationSchema = {
|
||||
description: Object.keys(VM_FUNCTIONS)
|
||||
.map(
|
||||
k =>
|
||||
` * ${k} (${VM_FUNCTIONS[k].unit}): ${
|
||||
VM_FUNCTIONS[k].description
|
||||
}`
|
||||
` * ${k} (${VM_FUNCTIONS[k].unit}): ${VM_FUNCTIONS[k].description}`
|
||||
)
|
||||
.join('\n'),
|
||||
type: 'string',
|
||||
@@ -284,9 +280,7 @@ export const configurationSchema = {
|
||||
description: Object.keys(SR_FUNCTIONS)
|
||||
.map(
|
||||
k =>
|
||||
` * ${k} (${SR_FUNCTIONS[k].unit}): ${
|
||||
SR_FUNCTIONS[k].description
|
||||
}`
|
||||
` * ${k} (${SR_FUNCTIONS[k].unit}): ${SR_FUNCTIONS[k].description}`
|
||||
)
|
||||
.join('\n'),
|
||||
type: 'string',
|
||||
@@ -414,9 +408,7 @@ ${monitorBodies.join('\n')}`
|
||||
}
|
||||
|
||||
_parseDefinition(definition) {
|
||||
const alarmId = `${definition.objectType}|${definition.variableName}|${
|
||||
definition.alarmTriggerLevel
|
||||
}`
|
||||
const alarmId = `${definition.objectType}|${definition.variableName}|${definition.alarmTriggerLevel}`
|
||||
const typeFunction =
|
||||
TYPE_FUNCTION_MAP[definition.objectType][definition.variableName]
|
||||
const parseData = (result, uuid) => {
|
||||
@@ -468,9 +460,7 @@ ${monitorBodies.join('\n')}`
|
||||
...definition,
|
||||
alarmId,
|
||||
vmFunction: typeFunction,
|
||||
title: `${typeFunction.name} ${definition.comparator} ${
|
||||
definition.alarmTriggerLevel
|
||||
}${typeFunction.unit}`,
|
||||
title: `${typeFunction.name} ${definition.comparator} ${definition.alarmTriggerLevel}${typeFunction.unit}`,
|
||||
snapshot: async () => {
|
||||
return Promise.all(
|
||||
map(definition.uuids, async uuid => {
|
||||
@@ -664,9 +654,7 @@ ${entry.listItem}
|
||||
subject: `[Xen Orchestra] − Performance Alert ${subjectSuffix}`,
|
||||
markdown:
|
||||
markdownBody +
|
||||
`\n\n\nSent from Xen Orchestra [perf-alert plugin](${
|
||||
this._configuration.baseUrl
|
||||
}#/settings/plugins)\n`,
|
||||
`\n\n\nSent from Xen Orchestra [perf-alert plugin](${this._configuration.baseUrl}#/settings/plugins)\n`,
|
||||
})
|
||||
} else {
|
||||
throw new Error('The email alert system has a configuration issue.')
|
||||
|
||||
3
packages/xo-server-sdn-controller/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
43
packages/xo-server-sdn-controller/README.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# xo-server-sdn-controller [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||
XO Server plugin that allows the creation of pool-wide private networks.
|
||||
|
||||
## Install
|
||||
|
||||
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
|
||||
|
||||
## Usage
|
||||
|
||||
### Network creation
|
||||
|
||||
In the network creation view, select a `pool` and `Private network`.
|
||||
Create the network.
|
||||
|
||||
Choice is offer between `GRE` and `VxLAN`, if `VxLAN` is chosen, then the port 4789 must be open for UDP traffic.
|
||||
The following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `VxLAN` is wanted:
|
||||
`-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m udp --dport 4789 -j ACCEPT`
|
||||
|
||||
### Configuration
|
||||
|
||||
Like all other xo-server plugins, it can be configured directly via
|
||||
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
|
||||
|
||||
The plugin's configuration contains:
|
||||
- `cert-dir`: A path where to find the certificates to create SSL connections with the hosts.
|
||||
If none is provided, the plugin will create its own self-signed certificates.
|
||||
- `override-certs:` Whether or not to uninstall an already existing SDN controller CA certificate in order to replace it by the plugin's one.
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
AGPL3 © [Vates SAS](http://vates.fr)
|
||||
36
packages/xo-server-sdn-controller/package.json
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"name": "xo-server-sdn-controller",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-sdn-controller",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-server-sdn-controller",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"main": "./dist",
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"version": "0.1.2",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.4.4",
|
||||
"@babel/core": "^7.4.4",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.4.4",
|
||||
"cross-env": "^5.2.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"lodash": "^4.17.11",
|
||||
"node-openssl-cert": "^0.0.84",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
959
packages/xo-server-sdn-controller/src/index.js
Normal file
@@ -0,0 +1,959 @@
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import NodeOpenssl from 'node-openssl-cert'
|
||||
import { access, constants, readFile, writeFile } from 'fs'
|
||||
import { EventEmitter } from 'events'
|
||||
import { filter, find, forEach, map } from 'lodash'
|
||||
import { fromCallback, fromEvent } from 'promise-toolbox'
|
||||
import { join } from 'path'
|
||||
|
||||
import { OvsdbClient } from './ovsdb-client'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller')
|
||||
|
||||
const PROTOCOL = 'pssl'
|
||||
|
||||
const CA_CERT = 'ca-cert.pem'
|
||||
const CLIENT_KEY = 'client-key.pem'
|
||||
const CLIENT_CERT = 'client-cert.pem'
|
||||
|
||||
const SDN_CONTROLLER_CERT = 'sdn-controller-ca.pem'
|
||||
|
||||
const NB_DAYS = 9999
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export const configurationSchema = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
'cert-dir': {
|
||||
description: `Full path to a directory where to find: \`client-cert.pem\`,
|
||||
\`client-key.pem\` and \`ca-cert.pem\` to create ssl connections with hosts.
|
||||
If none is provided, the plugin will create its own self-signed certificates.`,
|
||||
|
||||
type: 'string',
|
||||
},
|
||||
'override-certs': {
|
||||
description: `Replace already existing SDN controller CA certificate`,
|
||||
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
async function fileWrite(path, data) {
|
||||
await fromCallback(writeFile, path, data)
|
||||
}
|
||||
|
||||
async function fileRead(path) {
|
||||
const result = await fromCallback(readFile, path)
|
||||
return result
|
||||
}
|
||||
|
||||
async function fileExists(path) {
|
||||
try {
|
||||
await fromCallback(access, path, constants.F_OK)
|
||||
} catch (error) {
|
||||
if (error.code === 'ENOENT') {
|
||||
return false
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
class SDNController extends EventEmitter {
|
||||
constructor({ xo, getDataDir }) {
|
||||
super()
|
||||
|
||||
this._xo = xo
|
||||
|
||||
this._getDataDir = getDataDir
|
||||
|
||||
this._poolNetworks = []
|
||||
this._ovsdbClients = []
|
||||
this._newHosts = []
|
||||
|
||||
this._networks = new Map()
|
||||
this._starCenters = new Map()
|
||||
|
||||
this._cleaners = []
|
||||
this._objectsAdded = this._objectsAdded.bind(this)
|
||||
this._objectsUpdated = this._objectsUpdated.bind(this)
|
||||
|
||||
this._overrideCerts = false
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async configure(configuration) {
|
||||
this._overrideCerts = configuration['override-certs']
|
||||
let certDirectory = configuration['cert-dir']
|
||||
|
||||
if (certDirectory === undefined) {
|
||||
log.debug(`No cert-dir provided, using default self-signed certificates`)
|
||||
certDirectory = await this._getDataDir()
|
||||
|
||||
if (!(await fileExists(join(certDirectory, CA_CERT)))) {
|
||||
// If one certificate doesn't exist, none should
|
||||
assert(
|
||||
!(await fileExists(join(certDirectory, CLIENT_KEY))),
|
||||
`${CLIENT_KEY} should not exist`
|
||||
)
|
||||
assert(
|
||||
!(await fileExists(join(certDirectory, CLIENT_CERT))),
|
||||
`${CLIENT_CERT} should not exist`
|
||||
)
|
||||
|
||||
log.debug(`No default self-signed certificates exists, creating them`)
|
||||
await this._generateCertificatesAndKey(certDirectory)
|
||||
}
|
||||
}
|
||||
// TODO: verify certificates and create new certificates if needed
|
||||
|
||||
;[this._clientKey, this._clientCert, this._caCert] = await Promise.all([
|
||||
fileRead(join(certDirectory, CLIENT_KEY)),
|
||||
fileRead(join(certDirectory, CLIENT_CERT)),
|
||||
fileRead(join(certDirectory, CA_CERT)),
|
||||
])
|
||||
|
||||
this._ovsdbClients.forEach(client => {
|
||||
client.updateCertificates(this._clientKey, this._clientCert, this._caCert)
|
||||
})
|
||||
const updatedPools = []
|
||||
for (const poolNetwork of this._poolNetworks) {
|
||||
if (updatedPools.includes(poolNetwork.pool)) {
|
||||
continue
|
||||
}
|
||||
|
||||
const xapi = this._xo.getXapi(poolNetwork.pool)
|
||||
await this._installCaCertificateIfNeeded(xapi)
|
||||
updatedPools.push(poolNetwork.pool)
|
||||
}
|
||||
}
|
||||
|
||||
load() {
|
||||
const createPrivateNetwork = this._createPrivateNetwork.bind(this)
|
||||
createPrivateNetwork.description =
|
||||
'Creates a pool-wide private network on a selected pool'
|
||||
createPrivateNetwork.params = {
|
||||
poolId: { type: 'string' },
|
||||
networkName: { type: 'string' },
|
||||
networkDescription: { type: 'string' },
|
||||
encapsulation: { type: 'string' },
|
||||
pifId: { type: 'string' },
|
||||
}
|
||||
createPrivateNetwork.resolve = {
|
||||
xoPool: ['poolId', 'pool', ''],
|
||||
xoPif: ['pifId', 'PIF', ''],
|
||||
}
|
||||
this._unsetApiMethod = this._xo.addApiMethod(
|
||||
'plugin.SDNController.createPrivateNetwork',
|
||||
createPrivateNetwork
|
||||
)
|
||||
|
||||
// FIXME: we should monitor when xapis are added/removed
|
||||
return Promise.all(
|
||||
map(this._xo.getAllXapis(), async xapi => {
|
||||
await xapi.objectsFetched
|
||||
if (this._setControllerNeeded(xapi)) {
|
||||
return
|
||||
}
|
||||
|
||||
this._cleaners.push(await this._manageXapi(xapi))
|
||||
const hosts = filter(xapi.objects.all, { $type: 'host' })
|
||||
for (const host of hosts) {
|
||||
this._createOvsdbClient(host)
|
||||
}
|
||||
|
||||
// Add already existing pool-wide private networks
|
||||
const networks = filter(xapi.objects.all, { $type: 'network' })
|
||||
await Promise.all(
|
||||
map(networks, async network => {
|
||||
if (network.other_config.private_pool_wide !== 'true') {
|
||||
return
|
||||
}
|
||||
|
||||
log.debug('Adding network to managed networks', {
|
||||
network: network.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
const center = await this._electNewCenter(network, true)
|
||||
|
||||
// Previously created network didn't store `pif_device`
|
||||
if (network.other_config.pif_device === undefined) {
|
||||
const tunnel = this._getHostTunnelForNetwork(center, network.$ref)
|
||||
const pif = xapi.getObjectByRef(tunnel.transport_PIF)
|
||||
await xapi.call(
|
||||
'network.add_to_other_config',
|
||||
network.$ref,
|
||||
'pif_device',
|
||||
pif.device
|
||||
)
|
||||
}
|
||||
|
||||
this._poolNetworks.push({
|
||||
pool: network.$pool.$ref,
|
||||
network: network.$ref,
|
||||
starCenter: center?.$ref,
|
||||
})
|
||||
this._networks.set(network.$id, network.$ref)
|
||||
if (center !== undefined) {
|
||||
this._starCenters.set(center.$id, center.$ref)
|
||||
}
|
||||
})
|
||||
)
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async unload() {
|
||||
this._ovsdbClients = []
|
||||
this._poolNetworks = []
|
||||
this._newHosts = []
|
||||
|
||||
this._networks.clear()
|
||||
this._starCenters.clear()
|
||||
|
||||
this._cleaners.forEach(cleaner => cleaner())
|
||||
this._cleaners = []
|
||||
|
||||
this._unsetApiMethod()
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
|
||||
async _createPrivateNetwork({
|
||||
xoPool,
|
||||
networkName,
|
||||
networkDescription,
|
||||
encapsulation,
|
||||
xoPif,
|
||||
}) {
|
||||
const pool = this._xo.getXapiObject(xoPool)
|
||||
await this._setPoolControllerIfNeeded(pool)
|
||||
|
||||
const pif = this._xo.getXapiObject(xoPif)
|
||||
|
||||
// Create the private network
|
||||
const privateNetworkRef = await pool.$xapi.call('network.create', {
|
||||
name_label: networkName,
|
||||
name_description: networkDescription,
|
||||
MTU: 0,
|
||||
other_config: {
|
||||
automatic: 'false',
|
||||
private_pool_wide: 'true',
|
||||
encapsulation: encapsulation,
|
||||
pif_device: pif.device,
|
||||
},
|
||||
})
|
||||
|
||||
const privateNetwork = await pool.$xapi._getOrWaitObject(privateNetworkRef)
|
||||
|
||||
log.info('New private network created', {
|
||||
network: privateNetwork.name_label,
|
||||
pool: pool.name_label,
|
||||
})
|
||||
|
||||
// For each pool's host, create a tunnel to the private network
|
||||
const hosts = filter(pool.$xapi.objects.all, { $type: 'host' })
|
||||
await Promise.all(
|
||||
map(hosts, async host => {
|
||||
await this._createTunnel(host, privateNetwork, pif.device)
|
||||
this._createOvsdbClient(host)
|
||||
})
|
||||
)
|
||||
|
||||
const center = await this._electNewCenter(privateNetwork, false)
|
||||
this._poolNetworks.push({
|
||||
pool: pool.$ref,
|
||||
network: privateNetwork.$ref,
|
||||
starCenter: center?.$ref,
|
||||
})
|
||||
this._networks.set(privateNetwork.$id, privateNetwork.$ref)
|
||||
if (center !== undefined) {
|
||||
this._starCenters.set(center.$id, center.$ref)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _manageXapi(xapi) {
|
||||
const { objects } = xapi
|
||||
|
||||
const objectsRemovedXapi = this._objectsRemoved.bind(this, xapi)
|
||||
objects.on('add', this._objectsAdded)
|
||||
objects.on('update', this._objectsUpdated)
|
||||
objects.on('remove', objectsRemovedXapi)
|
||||
|
||||
await this._installCaCertificateIfNeeded(xapi)
|
||||
|
||||
return () => {
|
||||
objects.removeListener('add', this._objectsAdded)
|
||||
objects.removeListener('update', this._objectsUpdated)
|
||||
objects.removeListener('remove', objectsRemovedXapi)
|
||||
}
|
||||
}
|
||||
|
||||
_objectsAdded(objects) {
|
||||
forEach(objects, object => {
|
||||
const { $type } = object
|
||||
|
||||
if ($type === 'host') {
|
||||
log.debug('New host', {
|
||||
host: object.name_label,
|
||||
pool: object.$pool.name_label,
|
||||
})
|
||||
|
||||
if (find(this._newHosts, { $ref: object.$ref }) === undefined) {
|
||||
this._newHosts.push(object)
|
||||
}
|
||||
this._createOvsdbClient(object)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
_objectsUpdated(objects) {
|
||||
return Promise.all(
|
||||
map(objects, object => {
|
||||
const { $type } = object
|
||||
|
||||
if ($type === 'PIF') {
|
||||
return this._pifUpdated(object)
|
||||
}
|
||||
if ($type === 'host') {
|
||||
return this._hostUpdated(object)
|
||||
}
|
||||
if ($type === 'host_metrics') {
|
||||
return this._hostMetricsUpdated(object)
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
_objectsRemoved(xapi, objects) {
|
||||
return Promise.all(
|
||||
map(objects, async (object, id) => {
|
||||
this._ovsdbClients = this._ovsdbClients.filter(
|
||||
client => client.host.$id !== id
|
||||
)
|
||||
|
||||
// If a Star center host is removed: re-elect a new center where needed
|
||||
const starCenterRef = this._starCenters.get(id)
|
||||
if (starCenterRef !== undefined) {
|
||||
this._starCenters.delete(id)
|
||||
const poolNetworks = filter(this._poolNetworks, {
|
||||
starCenter: starCenterRef,
|
||||
})
|
||||
for (const poolNetwork of poolNetworks) {
|
||||
const network = xapi.getObjectByRef(poolNetwork.network)
|
||||
const newCenter = await this._electNewCenter(network, true)
|
||||
poolNetwork.starCenter = newCenter?.$ref
|
||||
if (newCenter !== undefined) {
|
||||
this._starCenters.set(newCenter.$id, newCenter.$ref)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If a network is removed, clean this._poolNetworks from it
|
||||
const networkRef = this._networks.get(id)
|
||||
if (networkRef !== undefined) {
|
||||
this._networks.delete(id)
|
||||
this._poolNetworks = this._poolNetworks.filter(
|
||||
poolNetwork => poolNetwork.network !== networkRef
|
||||
)
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async _pifUpdated(pif) {
|
||||
// Only if PIF is in a private network
|
||||
const poolNetwork = find(this._poolNetworks, { network: pif.network })
|
||||
if (poolNetwork === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
if (!pif.currently_attached) {
|
||||
const tunnel = this._getHostTunnelForNetwork(pif.$host, pif.network)
|
||||
await pif.$xapi.call('tunnel.set_status', tunnel.$ref, {
|
||||
active: 'false',
|
||||
})
|
||||
|
||||
if (poolNetwork.starCenter !== pif.host) {
|
||||
return
|
||||
}
|
||||
|
||||
log.debug(
|
||||
'PIF of star-center host has been unplugged, electing a new star-center',
|
||||
{
|
||||
pif: pif.device,
|
||||
network: pif.$network.name_label,
|
||||
host: pif.$host.name_label,
|
||||
pool: pif.$pool.name_label,
|
||||
}
|
||||
)
|
||||
const newCenter = await this._electNewCenter(pif.$network, true)
|
||||
poolNetwork.starCenter = newCenter?.$ref
|
||||
this._starCenters.delete(pif.$host.$id)
|
||||
if (newCenter !== undefined) {
|
||||
this._starCenters.set(newCenter.$id, newCenter.$ref)
|
||||
}
|
||||
} else {
|
||||
if (poolNetwork.starCenter === undefined) {
|
||||
const host = pif.$host
|
||||
log.debug('First available host becomes star center of network', {
|
||||
host: host.name_label,
|
||||
network: pif.$network.name_label,
|
||||
pool: pif.$pool.name_label,
|
||||
})
|
||||
poolNetwork.starCenter = pif.host
|
||||
this._starCenters.set(host.$id, host.$ref)
|
||||
}
|
||||
|
||||
log.debug('PIF plugged', {
|
||||
pif: pif.device,
|
||||
network: pif.$network.name_label,
|
||||
host: pif.$host.name_label,
|
||||
pool: pif.$pool.name_label,
|
||||
})
|
||||
|
||||
const starCenter = pif.$xapi.getObjectByRef(poolNetwork.starCenter)
|
||||
await this._addHostToNetwork(pif.$host, pif.$network, starCenter)
|
||||
}
|
||||
}
|
||||
|
||||
async _hostUpdated(host) {
|
||||
if (host.enabled) {
|
||||
if (host.PIFs.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const newHost = find(this._newHosts, { $ref: host.$ref })
|
||||
if (newHost !== undefined) {
|
||||
this._newHosts = this._newHosts.slice(
|
||||
this._newHosts.indexOf(newHost),
|
||||
1
|
||||
)
|
||||
|
||||
log.debug('Sync pool certificates', {
|
||||
newHost: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
try {
|
||||
await host.$xapi.call('pool.certificate_sync')
|
||||
} catch (error) {
|
||||
log.error('Error while syncing SDN controller CA certificate', {
|
||||
error,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
const poolNetworks = filter(this._poolNetworks, {
|
||||
pool: host.$pool.$ref,
|
||||
})
|
||||
for (const poolNetwork of poolNetworks) {
|
||||
const tunnel = this._getHostTunnelForNetwork(
|
||||
host,
|
||||
poolNetwork.network
|
||||
)
|
||||
if (tunnel !== undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const network = host.$xapi.getObjectByRef(poolNetwork.network)
|
||||
const pifDevice = network.other_config.pif_device || 'eth0'
|
||||
this._createTunnel(host, network, pifDevice)
|
||||
}
|
||||
|
||||
this._addHostToPoolNetworks(host)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_hostMetricsUpdated(hostMetrics) {
|
||||
const ovsdbClient = find(
|
||||
this._ovsdbClients,
|
||||
client => client.host.metrics === hostMetrics.$ref
|
||||
)
|
||||
|
||||
if (hostMetrics.live) {
|
||||
return this._addHostToPoolNetworks(ovsdbClient.host)
|
||||
}
|
||||
|
||||
return this._hostUnreachable(ovsdbClient.host)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _setPoolControllerIfNeeded(pool) {
|
||||
if (!this._setControllerNeeded(pool.$xapi)) {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
const controller = find(pool.$xapi.objects.all, { $type: 'SDN_controller' })
|
||||
if (controller !== undefined) {
|
||||
await pool.$xapi.call('SDN_controller.forget', controller.$ref)
|
||||
log.debug('Old SDN controller removed', {
|
||||
pool: pool.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
await pool.$xapi.call('SDN_controller.introduce', PROTOCOL)
|
||||
log.debug('SDN controller has been set', {
|
||||
pool: pool.name_label,
|
||||
})
|
||||
this._cleaners.push(await this._manageXapi(pool.$xapi))
|
||||
}
|
||||
|
||||
_setControllerNeeded(xapi) {
|
||||
const controller = find(xapi.objects.all, { $type: 'SDN_controller' })
|
||||
return !(
|
||||
controller !== undefined &&
|
||||
controller.protocol === PROTOCOL &&
|
||||
controller.address === '' &&
|
||||
controller.port === 0
|
||||
)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _installCaCertificateIfNeeded(xapi) {
|
||||
let needInstall = false
|
||||
try {
|
||||
const result = await xapi.call('pool.certificate_list')
|
||||
if (!result.includes(SDN_CONTROLLER_CERT)) {
|
||||
needInstall = true
|
||||
} else if (this._overrideCerts) {
|
||||
await xapi.call('pool.certificate_uninstall', SDN_CONTROLLER_CERT)
|
||||
log.debug('Old SDN controller CA certificate uninstalled', {
|
||||
pool: xapi.pool.name_label,
|
||||
})
|
||||
needInstall = true
|
||||
}
|
||||
} catch (error) {
|
||||
log.error('Error while retrieving certificate list', {
|
||||
error,
|
||||
pool: xapi.pool.name_label,
|
||||
})
|
||||
}
|
||||
if (!needInstall) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
await xapi.call(
|
||||
'pool.certificate_install',
|
||||
SDN_CONTROLLER_CERT,
|
||||
this._caCert.toString()
|
||||
)
|
||||
await xapi.call('pool.certificate_sync')
|
||||
log.debug('SDN controller CA certficate installed', {
|
||||
pool: xapi.pool.name_label,
|
||||
})
|
||||
} catch (error) {
|
||||
log.error('Error while installing SDN controller CA certificate', {
|
||||
error,
|
||||
pool: xapi.pool.name_label,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _electNewCenter(network, resetNeeded) {
|
||||
const pool = network.$pool
|
||||
|
||||
let newCenter
|
||||
const hosts = filter(pool.$xapi.objects.all, { $type: 'host' })
|
||||
|
||||
for (const host of hosts) {
|
||||
const pif = find(host.$PIFs, { network: network.$ref })
|
||||
if (pif !== undefined && pif.currently_attached && host.$metrics.live) {
|
||||
newCenter = host
|
||||
}
|
||||
}
|
||||
|
||||
await Promise.all(
|
||||
map(hosts, async host => {
|
||||
if (!resetNeeded) {
|
||||
return
|
||||
}
|
||||
|
||||
// Clean old ports and interfaces
|
||||
const hostClient = find(
|
||||
this._ovsdbClients,
|
||||
client => client.host.$ref === host.$ref
|
||||
)
|
||||
if (hostClient !== undefined) {
|
||||
try {
|
||||
await hostClient.resetForNetwork(network.uuid, network.name_label)
|
||||
} catch (error) {
|
||||
log.error('Error while resetting private network', {
|
||||
error,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
if (newCenter === undefined) {
|
||||
log.error('No available host to elect new star-center', {
|
||||
network: network.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Recreate star topology
|
||||
await Promise.all(
|
||||
map(hosts, host => this._addHostToNetwork(host, network, newCenter))
|
||||
)
|
||||
|
||||
log.info('New star-center elected', {
|
||||
center: newCenter.name_label,
|
||||
network: network.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
|
||||
return newCenter
|
||||
}
|
||||
|
||||
async _createTunnel(host, network, pifDevice) {
|
||||
const hostPif = find(host.$PIFs, { device: pifDevice })
|
||||
if (hostPif === undefined) {
|
||||
log.error("Can't create tunnel: no available PIF", {
|
||||
pif: pifDevice,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
await host.$xapi.call('tunnel.create', hostPif.$ref, network.$ref)
|
||||
} catch (error) {
|
||||
log.error('Error while creating tunnel', {
|
||||
error,
|
||||
pif: pifDevice,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
log.debug('New tunnel added', {
|
||||
pif: pifDevice,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
async _addHostToNetwork(host, network, starCenter) {
|
||||
if (host.$ref === starCenter.$ref) {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
const xapi = host.$xapi
|
||||
const tunnel = this._getHostTunnelForNetwork(host, network.$ref)
|
||||
const starCenterTunnel = this._getHostTunnelForNetwork(
|
||||
starCenter,
|
||||
network.$ref
|
||||
)
|
||||
await xapi.call('tunnel.set_status', tunnel.$ref, { active: 'false' })
|
||||
|
||||
const hostClient = find(
|
||||
this._ovsdbClients,
|
||||
client => client.host.$ref === host.$ref
|
||||
)
|
||||
if (hostClient === undefined) {
|
||||
log.error('No OVSDB client found', {
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const starCenterClient = find(
|
||||
this._ovsdbClients,
|
||||
client => client.host.$ref === starCenter.$ref
|
||||
)
|
||||
if (starCenterClient === undefined) {
|
||||
log.error('No OVSDB client found for star-center', {
|
||||
host: starCenter.name_label,
|
||||
pool: starCenter.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const encapsulation = network.other_config.encapsulation || 'gre'
|
||||
let bridgeName
|
||||
try {
|
||||
bridgeName = await hostClient.addInterfaceAndPort(
|
||||
network.uuid,
|
||||
network.name_label,
|
||||
starCenterClient.host.address,
|
||||
encapsulation
|
||||
)
|
||||
await starCenterClient.addInterfaceAndPort(
|
||||
network.uuid,
|
||||
network.name_label,
|
||||
hostClient.host.address,
|
||||
encapsulation
|
||||
)
|
||||
} catch (error) {
|
||||
log.error('Error while connecting host to private network', {
|
||||
error,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
if (bridgeName !== undefined) {
|
||||
const activeStatus = { active: 'true', key: bridgeName }
|
||||
await Promise.all([
|
||||
xapi.call('tunnel.set_status', tunnel.$ref, activeStatus),
|
||||
xapi.call('tunnel.set_status', starCenterTunnel.$ref, activeStatus),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
async _addHostToPoolNetworks(host) {
|
||||
const xapi = host.$xapi
|
||||
|
||||
const tunnels = filter(xapi.objects.all, { $type: 'tunnel' })
|
||||
for (const tunnel of tunnels) {
|
||||
const accessPif = xapi.getObjectByRef(tunnel.access_PIF)
|
||||
if (accessPif.host !== host.$ref) {
|
||||
continue
|
||||
}
|
||||
|
||||
const poolNetwork = find(this._poolNetworks, {
|
||||
network: accessPif.network,
|
||||
})
|
||||
if (poolNetwork === undefined || accessPif.currently_attached) {
|
||||
continue
|
||||
}
|
||||
|
||||
try {
|
||||
await xapi.call('PIF.plug', accessPif.$ref)
|
||||
} catch (error) {
|
||||
log.error('Error while plugging PIF', {
|
||||
error,
|
||||
pif: accessPif.device,
|
||||
network: accessPif.$network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
log.debug('PIF plugged', {
|
||||
pif: accessPif.device,
|
||||
network: accessPif.$network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
|
||||
const starCenter = xapi.getObjectByRef(poolNetwork.starCenter)
|
||||
await this._addHostToNetwork(host, accessPif.$network, starCenter)
|
||||
}
|
||||
}
|
||||
|
||||
async _hostUnreachable(host) {
|
||||
const poolNetworks = filter(this._poolNetworks, { starCenter: host.$ref })
|
||||
for (const poolNetwork of poolNetworks) {
|
||||
const network = host.$xapi.getObjectByRef(poolNetwork.network)
|
||||
log.debug('Unreachable star-center, electing a new one', {
|
||||
network: network.name_label,
|
||||
center: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
|
||||
const newCenter = await this._electNewCenter(network, true)
|
||||
poolNetwork.starCenter = newCenter?.$ref
|
||||
this._starCenters.delete(host.$id)
|
||||
if (newCenter !== undefined) {
|
||||
this._starCenters.set(newCenter.$id, newCenter.$ref)
|
||||
}
|
||||
}
|
||||
|
||||
for (const poolNetwork of this._poolNetworks) {
|
||||
const tunnel = this._getHostTunnelForNetwork(host, poolNetwork.network)
|
||||
await host.$xapi.call('tunnel.set_status', tunnel.$ref, {
|
||||
active: 'false',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_getHostTunnelForNetwork(host, networkRef) {
|
||||
const pif = find(host.$PIFs, { network: networkRef })
|
||||
if (pif === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const tunnel = find(host.$xapi.objects.all, {
|
||||
$type: 'tunnel',
|
||||
access_PIF: pif.$ref,
|
||||
})
|
||||
|
||||
return tunnel
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_createOvsdbClient(host) {
|
||||
const foundClient = find(
|
||||
this._ovsdbClients,
|
||||
client => client.host.$ref === host.$ref
|
||||
)
|
||||
if (foundClient !== undefined) {
|
||||
return foundClient
|
||||
}
|
||||
|
||||
const client = new OvsdbClient(
|
||||
host,
|
||||
this._clientKey,
|
||||
this._clientCert,
|
||||
this._caCert
|
||||
)
|
||||
this._ovsdbClients.push(client)
|
||||
return client
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _generateCertificatesAndKey(dataDir) {
|
||||
const openssl = new NodeOpenssl()
|
||||
|
||||
const rsakeyoptions = {
|
||||
rsa_keygen_bits: 4096,
|
||||
format: 'PKCS8',
|
||||
}
|
||||
const subject = {
|
||||
countryName: 'XX',
|
||||
localityName: 'Default City',
|
||||
organizationName: 'Default Company LTD',
|
||||
}
|
||||
const csroptions = {
|
||||
hash: 'sha256',
|
||||
startdate: new Date('1984-02-04 00:00:00'),
|
||||
enddate: new Date('2143-06-04 04:16:23'),
|
||||
subject: subject,
|
||||
}
|
||||
const cacsroptions = {
|
||||
hash: 'sha256',
|
||||
days: NB_DAYS,
|
||||
subject: subject,
|
||||
}
|
||||
|
||||
// In all the following callbacks, `error` is:
|
||||
// - either an error object if there was an error
|
||||
// - or a boolean set to `false` if no error occurred
|
||||
openssl.generateRSAPrivateKey(rsakeyoptions, (error, cakey, cmd) => {
|
||||
if (error !== false) {
|
||||
log.error('Error while generating CA private key', {
|
||||
error,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
openssl.generateCSR(cacsroptions, cakey, null, (error, csr, cmd) => {
|
||||
if (error !== false) {
|
||||
log.error('Error while generating CA certificate', {
|
||||
error,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
openssl.selfSignCSR(
|
||||
csr,
|
||||
cacsroptions,
|
||||
cakey,
|
||||
null,
|
||||
async (error, cacrt, cmd) => {
|
||||
if (error !== false) {
|
||||
log.error('Error while signing CA certificate', {
|
||||
error,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
await fileWrite(join(dataDir, CA_CERT), cacrt)
|
||||
openssl.generateRSAPrivateKey(
|
||||
rsakeyoptions,
|
||||
async (error, key, cmd) => {
|
||||
if (error !== false) {
|
||||
log.error('Error while generating private key', {
|
||||
error,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
await fileWrite(join(dataDir, CLIENT_KEY), key)
|
||||
openssl.generateCSR(
|
||||
csroptions,
|
||||
key,
|
||||
null,
|
||||
(error, csr, cmd) => {
|
||||
if (error !== false) {
|
||||
log.error('Error while generating certificate', {
|
||||
error,
|
||||
})
|
||||
return
|
||||
}
|
||||
openssl.CASignCSR(
|
||||
csr,
|
||||
cacsroptions,
|
||||
false,
|
||||
cacrt,
|
||||
cakey,
|
||||
null,
|
||||
async (error, crt, cmd) => {
|
||||
if (error !== false) {
|
||||
log.error('Error while signing certificate', {
|
||||
error,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
await fileWrite(join(dataDir, CLIENT_CERT), crt)
|
||||
this.emit('certWritten')
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
await fromEvent(this, 'certWritten', {})
|
||||
log.debug('All certificates have been successfully written')
|
||||
}
|
||||
}
|
||||
|
||||
export default opts => new SDNController(opts)
|
||||
515
packages/xo-server-sdn-controller/src/ovsdb-client.js
Normal file
@@ -0,0 +1,515 @@
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import forOwn from 'lodash/forOwn'
|
||||
import fromEvent from 'promise-toolbox/fromEvent'
|
||||
import { connect } from 'tls'
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller:ovsdb-client')
|
||||
|
||||
const OVSDB_PORT = 6640
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export class OvsdbClient {
|
||||
constructor(host, clientKey, clientCert, caCert) {
|
||||
this._numberOfPortAndInterface = 0
|
||||
this._requestID = 0
|
||||
|
||||
this._adding = []
|
||||
|
||||
this.host = host
|
||||
|
||||
this.updateCertificates(clientKey, clientCert, caCert)
|
||||
|
||||
log.debug('New OVSDB client', {
|
||||
host: this.host.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
updateCertificates(clientKey, clientCert, caCert) {
|
||||
this._clientKey = clientKey
|
||||
this._clientCert = clientCert
|
||||
this._caCert = caCert
|
||||
|
||||
log.debug('Certificates have been updated', {
|
||||
host: this.host.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async addInterfaceAndPort(
|
||||
networkUuid,
|
||||
networkName,
|
||||
remoteAddress,
|
||||
encapsulation
|
||||
) {
|
||||
if (
|
||||
this._adding.find(
|
||||
elem => elem.id === networkUuid && elem.addr === remoteAddress
|
||||
) !== undefined
|
||||
) {
|
||||
return
|
||||
}
|
||||
const adding = { id: networkUuid, addr: remoteAddress }
|
||||
this._adding.push(adding)
|
||||
|
||||
const socket = await this._connect()
|
||||
const index = this._numberOfPortAndInterface
|
||||
++this._numberOfPortAndInterface
|
||||
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid === undefined) {
|
||||
socket.destroy()
|
||||
this._adding = this._adding.slice(this._adding.indexOf(adding), 1)
|
||||
return
|
||||
}
|
||||
|
||||
const alreadyExist = await this._interfaceAndPortAlreadyExist(
|
||||
bridgeUuid,
|
||||
bridgeName,
|
||||
remoteAddress,
|
||||
socket
|
||||
)
|
||||
if (alreadyExist) {
|
||||
socket.destroy()
|
||||
this._adding = this._adding.slice(this._adding.indexOf(adding), 1)
|
||||
return bridgeName
|
||||
}
|
||||
|
||||
const interfaceName = 'tunnel_iface' + index
|
||||
const portName = 'tunnel_port' + index
|
||||
|
||||
// Add interface and port to the bridge
|
||||
const options = ['map', [['remote_ip', remoteAddress]]]
|
||||
const addInterfaceOperation = {
|
||||
op: 'insert',
|
||||
table: 'Interface',
|
||||
row: {
|
||||
type: encapsulation,
|
||||
options: options,
|
||||
name: interfaceName,
|
||||
other_config: ['map', [['private_pool_wide', 'true']]],
|
||||
},
|
||||
'uuid-name': 'new_iface',
|
||||
}
|
||||
const addPortOperation = {
|
||||
op: 'insert',
|
||||
table: 'Port',
|
||||
row: {
|
||||
name: portName,
|
||||
interfaces: ['set', [['named-uuid', 'new_iface']]],
|
||||
other_config: ['map', [['private_pool_wide', 'true']]],
|
||||
},
|
||||
'uuid-name': 'new_port',
|
||||
}
|
||||
const mutateBridgeOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
|
||||
mutations: [['ports', 'insert', ['set', [['named-uuid', 'new_port']]]]],
|
||||
}
|
||||
const params = [
|
||||
'Open_vSwitch',
|
||||
addInterfaceOperation,
|
||||
addPortOperation,
|
||||
mutateBridgeOperation,
|
||||
]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
|
||||
this._adding = this._adding.slice(this._adding.indexOf(adding), 1)
|
||||
if (jsonObjects === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
let error
|
||||
let details
|
||||
let i = 0
|
||||
let opResult
|
||||
do {
|
||||
opResult = jsonObjects[0].result[i]
|
||||
if (opResult !== undefined && opResult.error !== undefined) {
|
||||
error = opResult.error
|
||||
details = opResult.details
|
||||
}
|
||||
++i
|
||||
} while (opResult !== undefined && error === undefined)
|
||||
|
||||
if (error !== undefined) {
|
||||
log.error('Error while adding port and interface to bridge', {
|
||||
error,
|
||||
details,
|
||||
port: portName,
|
||||
interface: interfaceName,
|
||||
bridge: bridgeName,
|
||||
network: networkName,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
log.debug('Port and interface added to bridge', {
|
||||
port: portName,
|
||||
interface: interfaceName,
|
||||
bridge: bridgeName,
|
||||
network: networkName,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
return bridgeName
|
||||
}
|
||||
|
||||
async resetForNetwork(networkUuid, networkName) {
|
||||
const socket = await this._connect()
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
// Delete old ports created by a SDN controller
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
if (ports === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
const portsToDelete = []
|
||||
for (const port of ports) {
|
||||
const portUuid = port[1]
|
||||
|
||||
const where = [['_uuid', '==', ['uuid', portUuid]]]
|
||||
const selectResult = await this._select(
|
||||
'Port',
|
||||
['name', 'other_config'],
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
forOwn(selectResult.other_config[1], config => {
|
||||
if (config[0] === 'private_pool_wide' && config[1] === 'true') {
|
||||
portsToDelete.push(['uuid', portUuid])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (portsToDelete.length === 0) {
|
||||
// Nothing to do
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
const mutateBridgeOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
|
||||
mutations: [['ports', 'delete', ['set', portsToDelete]]],
|
||||
}
|
||||
|
||||
const params = ['Open_vSwitch', mutateBridgeOperation]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
if (jsonObjects[0].error != null) {
|
||||
log.error('Error while deleting ports from bridge', {
|
||||
error: jsonObjects[0].error,
|
||||
bridge: bridgeName,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
log.debug('Ports deleted from bridge', {
|
||||
nPorts: jsonObjects[0].result[0].count,
|
||||
bridge: bridgeName,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
|
||||
_parseJson(chunk) {
|
||||
let data = chunk.toString()
|
||||
let buffer = ''
|
||||
let depth = 0
|
||||
let pos = 0
|
||||
const objects = []
|
||||
|
||||
for (let i = pos; i < data.length; ++i) {
|
||||
const c = data.charAt(i)
|
||||
if (c === '{') {
|
||||
depth++
|
||||
} else if (c === '}') {
|
||||
depth--
|
||||
if (depth === 0) {
|
||||
const object = JSON.parse(buffer + data.substr(0, i + 1))
|
||||
objects.push(object)
|
||||
buffer = ''
|
||||
data = data.substr(i + 1)
|
||||
pos = 0
|
||||
i = -1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buffer += data
|
||||
return objects
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _getBridgeUuidForNetwork(networkUuid, networkName, socket) {
|
||||
const where = [
|
||||
[
|
||||
'external_ids',
|
||||
'includes',
|
||||
['map', [['xs-network-uuids', networkUuid]]],
|
||||
],
|
||||
]
|
||||
const selectResult = await this._select(
|
||||
'Bridge',
|
||||
['_uuid', 'name'],
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult === undefined) {
|
||||
log.error('No bridge found for network', {
|
||||
network: networkName,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return []
|
||||
}
|
||||
|
||||
const bridgeUuid = selectResult._uuid[1]
|
||||
const bridgeName = selectResult.name
|
||||
|
||||
return [bridgeUuid, bridgeName]
|
||||
}
|
||||
|
||||
async _interfaceAndPortAlreadyExist(
|
||||
bridgeUuid,
|
||||
bridgeName,
|
||||
remoteAddress,
|
||||
socket
|
||||
) {
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
if (ports === undefined) {
|
||||
return false
|
||||
}
|
||||
|
||||
for (const port of ports) {
|
||||
const portUuid = port[1]
|
||||
const interfaces = await this._getPortInterfaces(portUuid, socket)
|
||||
if (interfaces === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
for (const iface of interfaces) {
|
||||
const interfaceUuid = iface[1]
|
||||
const hasRemote = await this._interfaceHasRemote(
|
||||
interfaceUuid,
|
||||
remoteAddress,
|
||||
socket
|
||||
)
|
||||
if (hasRemote === true) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
async _getBridgePorts(bridgeUuid, bridgeName, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', bridgeUuid]]]
|
||||
const selectResult = await this._select('Bridge', ['ports'], where, socket)
|
||||
if (selectResult === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
return selectResult.ports[0] === 'set'
|
||||
? selectResult.ports[1]
|
||||
: [selectResult.ports]
|
||||
}
|
||||
|
||||
async _getPortInterfaces(portUuid, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', portUuid]]]
|
||||
const selectResult = await this._select(
|
||||
'Port',
|
||||
['name', 'interfaces'],
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
return selectResult.interfaces[0] === 'set'
|
||||
? selectResult.interfaces[1]
|
||||
: [selectResult.interfaces]
|
||||
}
|
||||
|
||||
async _interfaceHasRemote(interfaceUuid, remoteAddress, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', interfaceUuid]]]
|
||||
const selectResult = await this._select(
|
||||
'Interface',
|
||||
['name', 'options'],
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult === undefined) {
|
||||
return false
|
||||
}
|
||||
|
||||
for (const option of selectResult.options[1]) {
|
||||
if (option[0] === 'remote_ip' && option[1] === remoteAddress) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _select(table, columns, where, socket) {
|
||||
const selectOperation = {
|
||||
op: 'select',
|
||||
table: table,
|
||||
columns: columns,
|
||||
where: where,
|
||||
}
|
||||
|
||||
const params = ['Open_vSwitch', selectOperation]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects === undefined) {
|
||||
return
|
||||
}
|
||||
const jsonResult = jsonObjects[0].result[0]
|
||||
if (jsonResult.error !== undefined) {
|
||||
log.error('Error while selecting columns', {
|
||||
error: jsonResult.error,
|
||||
details: jsonResult.details,
|
||||
columns,
|
||||
table,
|
||||
where,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if (jsonResult.rows.length === 0) {
|
||||
log.error('No result for select', {
|
||||
columns,
|
||||
table,
|
||||
where,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// For now all select operations should return only 1 row
|
||||
assert(
|
||||
jsonResult.rows.length === 1,
|
||||
`[${this.host.name_label}] There should be exactly 1 row when searching: '${columns}' in: '${table}' where: '${where}'`
|
||||
)
|
||||
|
||||
return jsonResult.rows[0]
|
||||
}
|
||||
|
||||
async _sendOvsdbTransaction(params, socket) {
|
||||
const stream = socket
|
||||
|
||||
const requestId = this._requestID
|
||||
++this._requestID
|
||||
const req = {
|
||||
id: requestId,
|
||||
method: 'transact',
|
||||
params: params,
|
||||
}
|
||||
|
||||
try {
|
||||
stream.write(JSON.stringify(req))
|
||||
} catch (error) {
|
||||
log.error('Error while writing into stream', {
|
||||
error,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
let result
|
||||
let jsonObjects
|
||||
let resultRequestId
|
||||
do {
|
||||
try {
|
||||
result = await fromEvent(stream, 'data', {})
|
||||
} catch (error) {
|
||||
log.error('Error while waiting for stream data', {
|
||||
error,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
jsonObjects = this._parseJson(result)
|
||||
resultRequestId = jsonObjects[0].id
|
||||
} while (resultRequestId !== requestId)
|
||||
|
||||
return jsonObjects
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _connect() {
|
||||
const options = {
|
||||
ca: this._caCert,
|
||||
key: this._clientKey,
|
||||
cert: this._clientCert,
|
||||
host: this.host.address,
|
||||
port: OVSDB_PORT,
|
||||
rejectUnauthorized: false,
|
||||
requestCert: false,
|
||||
}
|
||||
const socket = connect(options)
|
||||
|
||||
try {
|
||||
await fromEvent(socket, 'secureConnect', {})
|
||||
} catch (error) {
|
||||
log.error('TLS connection failed', {
|
||||
error,
|
||||
code: error.code,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
socket.on('error', error => {
|
||||
log.error('Socket error', {
|
||||
error,
|
||||
code: error.code,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
})
|
||||
|
||||
return socket
|
||||
}
|
||||
}
|
||||
8
packages/xo-server-test/.babelrc.js
Normal file
@@ -0,0 +1,8 @@
|
||||
const pkg = require('./package.json')
|
||||
|
||||
// `xo-server-test` is a special package which has no dev dependencies but our
|
||||
// babel config generator only looks in `devDependencies`.
|
||||
require('assert').strictEqual(pkg.devDependencies, undefined)
|
||||
pkg.devDependencies = pkg.dependencies
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(pkg)
|
||||
24
packages/xo-server-test/.npmignore
Normal file
@@ -0,0 +1,24 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
144
packages/xo-server-test/README.md
Normal file
@@ -0,0 +1,144 @@
|
||||
# xo-server-test
|
||||
|
||||
> Test client for Xo-Server
|
||||
|
||||
Tests are ran sequentially to avoid concurrency issues.
|
||||
|
||||
## Adding a test
|
||||
|
||||
### Organization
|
||||
|
||||
```
|
||||
src
|
||||
├─ user
|
||||
| ├─ __snapshots__
|
||||
| | └─ index.spec.js.snap
|
||||
| └─ index.spec.js
|
||||
├─ job
|
||||
¦ └─ index.spec.js
|
||||
¦
|
||||
¦
|
||||
├─ _xoConnection.js
|
||||
└─ util.js
|
||||
```
|
||||
|
||||
The tests can describe xo methods or scenarios:
|
||||
```javascript
|
||||
import xo from "../_xoConnection";
|
||||
|
||||
describe("user", () => {
|
||||
|
||||
// testing a method
|
||||
describe(".set()", () => {
|
||||
it("sets an email", async () => {
|
||||
// some tests using xo methods and helpers from _xoConnection.js
|
||||
const id = await xo.createTempUser(SIMPLE_USER);
|
||||
expect(await xo.call("user.set", params)).toBe(true);
|
||||
expect(await xo.getUser(id)).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// testing a scenario
|
||||
test("create two users, modify a user email to be the same with the other and fail trying to connect them", () => {
|
||||
/* some tests */
|
||||
});
|
||||
|
||||
});
|
||||
```
|
||||
|
||||
### Best practices
|
||||
|
||||
- The test environment must remain the same before and after each test:
|
||||
* each resource created must be deleted
|
||||
* existing resources should not be altered
|
||||
|
||||
- Make a sentence for the title of the test. It must be clear and consistent.
|
||||
|
||||
- If the feature you want to test is not implemented : write it and skip it, using `it.skip()`.
|
||||
|
||||
- Take values that cover the maximum of testing possibilities.
|
||||
|
||||
- If you make tests which keep track of large object, it is better to use snapshots.
|
||||
|
||||
- `_xoConnection.js` contains helpers to create temporary resources and to interface with XO.
|
||||
You can use it if you need to create resources which will be automatically deleted after the test:
|
||||
```javascript
|
||||
import xo from "../_xoConnection";
|
||||
|
||||
describe(".create()", () => {
|
||||
it("creates a user without permission", async () => {
|
||||
// The user will be deleted automatically at the end of the test
|
||||
const userId = await xo.createTempUser({
|
||||
email: "wayne1@vates.fr",
|
||||
password: "batman1",
|
||||
});
|
||||
expect(await xo.getUser(userId)).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
});
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
The available helpers:
|
||||
* `createTempUser(params)`
|
||||
* `getUser(id)`
|
||||
* `createTempJob(params)`
|
||||
* `createTempBackupNgJob(params)`
|
||||
* `createTempVm(params)`
|
||||
* `getSchedule(predicate)`
|
||||
|
||||
## Usage
|
||||
|
||||
- Before running the tests, you have to create a config file for xo-server-test.
|
||||
```
|
||||
> cp sample.config.toml ~/.config/xo-server-test/config.toml
|
||||
```
|
||||
And complete it.
|
||||
|
||||
- To run the tests:
|
||||
```
|
||||
> npm ci
|
||||
> yarn test
|
||||
```
|
||||
|
||||
You get all the test suites passed (`PASS`) or failed (`FAIL`).
|
||||
```
|
||||
> yarn test
|
||||
yarn run v1.9.4
|
||||
$ jest
|
||||
PASS src/user/user.spec.js
|
||||
PASS src/job/job.spec.js
|
||||
PASS src/backupNg/backupNg.spec.js
|
||||
|
||||
Test Suites: 3 passed, 3 total
|
||||
Tests: 2 skipped, 36 passed, 38 total
|
||||
Snapshots: 35 passed, 35 total
|
||||
Time: 7.257s, estimated 8s
|
||||
Ran all test suites.
|
||||
Done in 7.92s.
|
||||
```
|
||||
|
||||
- You can run only tests related to changed files, and review the failed output by using: `> yarn test --watch`
|
||||
|
||||
- ⚠ Warning: snapshots ⚠
|
||||
After each run of the tests, check that snapshots are not inadvertently modified.
|
||||
|
||||
- ⚠ Jest known issue ⚠
|
||||
If a test timeout is triggered the next async tests can fail, it is due to an inadvertently modified snapshots.
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
ISC © [Vates SAS](http://vates.fr)
|
||||
56
packages/xo-server-test/package.json
Normal file
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "xo-server-test",
|
||||
"version": "0.0.0",
|
||||
"license": "ISC",
|
||||
"description": "Test client for Xo-Server",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-test",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-server-test",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Julien Fontanet",
|
||||
"email": "julien.fontanet@isonoe.net"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
"@babel/core": "^7.1.6",
|
||||
"@babel/plugin-proposal-decorators": "^7.4.0",
|
||||
"@babel/preset-env": "^7.1.6",
|
||||
"@iarna/toml": "^2.2.1",
|
||||
"app-conf": "^0.7.0",
|
||||
"babel-plugin-lodash": "^3.2.11",
|
||||
"golike-defer": "^0.4.1",
|
||||
"jest": "^24.8.0",
|
||||
"lodash": "^4.17.11",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"xo-collection": "^0.4.1",
|
||||
"xo-common": "^0.2.0",
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"scripts": {
|
||||
"dev-test": "jest --bail --watch",
|
||||
"test": "jest"
|
||||
},
|
||||
"jest": {
|
||||
"modulePathIgnorePatterns": [
|
||||
"<rootDir>/src/old-tests"
|
||||
],
|
||||
"testEnvironment": "node",
|
||||
"testRegex": "\\.spec\\.js$",
|
||||
"maxConcurrency": 1
|
||||
}
|
||||
}
|
||||
22
packages/xo-server-test/sample.config.toml
Normal file
@@ -0,0 +1,22 @@
|
||||
[xoConnection]
|
||||
url = ''
|
||||
email = ''
|
||||
password = ''
|
||||
|
||||
[servers]
|
||||
[servers.default]
|
||||
username = ''
|
||||
password = ''
|
||||
host = ''
|
||||
|
||||
[vms]
|
||||
default = ''
|
||||
|
||||
[templates]
|
||||
default = ''
|
||||
|
||||
[srs]
|
||||
default = ''
|
||||
|
||||
[remotes]
|
||||
default = { name = '', url = '' }
|
||||
13
packages/xo-server-test/src/_config.js
Normal file
@@ -0,0 +1,13 @@
|
||||
import appConf from 'app-conf'
|
||||
import path from 'path'
|
||||
|
||||
/* eslint-env jest */
|
||||
|
||||
let config
|
||||
export { config as default }
|
||||
|
||||
beforeAll(async () => {
|
||||
config = await appConf.load('xo-server-test', {
|
||||
appDir: path.join(__dirname, '..'),
|
||||
})
|
||||
})
|
||||
6
packages/xo-server-test/src/_randomId.js
Normal file
@@ -0,0 +1,6 @@
|
||||
const randomId = () =>
|
||||
Math.random()
|
||||
.toString(36)
|
||||
.slice(2)
|
||||
|
||||
export { randomId as default }
|
||||
208
packages/xo-server-test/src/_xoConnection.js
Normal file
@@ -0,0 +1,208 @@
|
||||
/* eslint-env jest */
|
||||
import defer from 'golike-defer'
|
||||
import Xo from 'xo-lib'
|
||||
import XoCollection from 'xo-collection'
|
||||
import { find, forOwn } from 'lodash'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import config from './_config'
|
||||
|
||||
const getDefaultCredentials = () => {
|
||||
const { email, password } = config.xoConnection
|
||||
return { email, password }
|
||||
}
|
||||
|
||||
class XoConnection extends Xo {
|
||||
constructor(opts) {
|
||||
super(opts)
|
||||
|
||||
const objects = (this._objects = new XoCollection())
|
||||
const watchers = (this._watchers = {})
|
||||
this._tempResourceDisposers = []
|
||||
this._durableResourceDisposers = []
|
||||
|
||||
this.on('notification', ({ method, params }) => {
|
||||
if (method !== 'all') {
|
||||
return
|
||||
}
|
||||
|
||||
const fn = params.type === 'exit' ? objects.unset : objects.set
|
||||
forOwn(params.items, (item, id) => {
|
||||
fn.call(objects, id, item)
|
||||
|
||||
const watcher = watchers[id]
|
||||
if (watcher !== undefined) {
|
||||
watcher(item)
|
||||
delete watchers[id]
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
get objects() {
|
||||
return this._objects
|
||||
}
|
||||
|
||||
async _fetchObjects() {
|
||||
const { _objects: objects, _watchers: watchers } = this
|
||||
forOwn(await this.call('xo.getAllObjects'), (object, id) => {
|
||||
objects.set(id, object)
|
||||
|
||||
const watcher = watchers[id]
|
||||
if (watcher !== undefined) {
|
||||
watcher(object)
|
||||
delete watchers[id]
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: integrate in xo-lib.
|
||||
waitObject(id) {
|
||||
return new Promise(resolve => {
|
||||
this._watchers[id] = resolve
|
||||
}) // FIXME: work with multiple listeners.
|
||||
}
|
||||
|
||||
async getOrWaitObject(id) {
|
||||
const object = this._objects.all[id]
|
||||
if (object !== undefined) {
|
||||
return object
|
||||
}
|
||||
return this.waitObject(id)
|
||||
}
|
||||
|
||||
@defer
|
||||
async connect($defer, credentials = getDefaultCredentials()) {
|
||||
await this.open()
|
||||
$defer.onFailure(() => this.close())
|
||||
|
||||
await this.signIn(credentials)
|
||||
await this._fetchObjects()
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
async waitObjectState(id, predicate) {
|
||||
let obj = this._objects.all[id]
|
||||
while (true) {
|
||||
try {
|
||||
await predicate(obj)
|
||||
return
|
||||
} catch (_) {}
|
||||
// If failed, wait for next object state/update and retry.
|
||||
obj = await this.waitObject(id)
|
||||
}
|
||||
}
|
||||
|
||||
async createTempUser(params) {
|
||||
const id = await this.call('user.create', params)
|
||||
this._tempResourceDisposers.push('user.delete', { id })
|
||||
return id
|
||||
}
|
||||
|
||||
async getUser(id) {
|
||||
return find(await super.call('user.getAll'), { id })
|
||||
}
|
||||
|
||||
async createTempJob(params) {
|
||||
const id = await this.call('job.create', { job: params })
|
||||
this._tempResourceDisposers.push('job.delete', { id })
|
||||
return id
|
||||
}
|
||||
|
||||
async createTempBackupNgJob(params) {
|
||||
const job = await this.call('backupNg.createJob', params)
|
||||
this._tempResourceDisposers.push('backupNg.deleteJob', { id: job.id })
|
||||
return job
|
||||
}
|
||||
|
||||
async createTempVm(params) {
|
||||
const id = await this.call('vm.create', params)
|
||||
this._tempResourceDisposers.push('vm.delete', { id })
|
||||
await this.waitObjectState(id, vm => {
|
||||
if (vm.type !== 'VM') throw new Error('retry')
|
||||
})
|
||||
return id
|
||||
}
|
||||
|
||||
async createTempRemote(params) {
|
||||
const remote = await this.call('remote.create', params)
|
||||
this._tempResourceDisposers.push('remote.delete', { id: remote.id })
|
||||
return remote
|
||||
}
|
||||
|
||||
async createTempServer(params) {
|
||||
const servers = await this.call('server.getAll')
|
||||
const server = servers.find(server => server.host === params.host)
|
||||
if (server !== undefined) {
|
||||
if (server.status === 'disconnected') {
|
||||
await this.call('server.enable', { id: server.id })
|
||||
this._durableResourceDisposers.push('server.disable', { id: server.id })
|
||||
await fromEvent(this._objects, 'finish')
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const id = await this.call('server.add', {
|
||||
...params,
|
||||
allowUnauthorized: true,
|
||||
autoConnect: false,
|
||||
})
|
||||
this._durableResourceDisposers.push('server.remove', { id })
|
||||
await this.call('server.enable', { id })
|
||||
await fromEvent(this._objects, 'finish')
|
||||
}
|
||||
|
||||
async getSchedule(predicate) {
|
||||
return find(await this.call('schedule.getAll'), predicate)
|
||||
}
|
||||
|
||||
async _cleanDisposers(disposers) {
|
||||
for (let n = disposers.length - 1; n > 0; ) {
|
||||
const params = disposers[n--]
|
||||
const method = disposers[n--]
|
||||
await this.call(method, params).catch(error => {
|
||||
console.warn('deleteTempResources', method, params, error)
|
||||
})
|
||||
}
|
||||
disposers.length = 0
|
||||
}
|
||||
|
||||
async deleteTempResources() {
|
||||
await this._cleanDisposers(this._tempResourceDisposers)
|
||||
}
|
||||
|
||||
async deleteDurableResources() {
|
||||
await this._cleanDisposers(this._durableResourceDisposers)
|
||||
}
|
||||
}
|
||||
|
||||
const getConnection = credentials => {
|
||||
const xo = new XoConnection({ url: config.xoConnection.url })
|
||||
return xo.connect(credentials)
|
||||
}
|
||||
|
||||
let xo
|
||||
beforeAll(async () => {
|
||||
// TOFIX: stop tests if the connection is not established properly and show the error
|
||||
xo = await getConnection()
|
||||
})
|
||||
afterAll(async () => {
|
||||
await xo.deleteDurableResources()
|
||||
await xo.close()
|
||||
xo = null
|
||||
})
|
||||
afterEach(() => xo.deleteTempResources())
|
||||
|
||||
export { xo as default }
|
||||
|
||||
export const testConnection = ({ credentials }) =>
|
||||
getConnection(credentials).then(connection => connection.close())
|
||||
|
||||
export const testWithOtherConnection = defer(
|
||||
async ($defer, credentials, functionToExecute) => {
|
||||
const xoUser = await getConnection(credentials)
|
||||
$defer(() => xoUser.close())
|
||||
await functionToExecute(xoUser)
|
||||
}
|
||||
)
|
||||
@@ -0,0 +1,170 @@
|
||||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`backupNg .createJob() : creates a new backup job with schedules 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"mode": "full",
|
||||
"name": "default-backupNg",
|
||||
"settings": Any<Object>,
|
||||
"type": "backup",
|
||||
"userId": Any<String>,
|
||||
"vms": Any<Object>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .createJob() : creates a new backup job with schedules 2`] = `
|
||||
Object {
|
||||
"cron": "0 * * * * *",
|
||||
"enabled": false,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"name": "scheduleTest",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .createJob() : creates a new backup job without schedules 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"mode": "full",
|
||||
"name": "default-backupNg",
|
||||
"settings": Object {
|
||||
"": Object {
|
||||
"reportWhen": "never",
|
||||
},
|
||||
},
|
||||
"type": "backup",
|
||||
"userId": Any<String>,
|
||||
"vms": Any<Object>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with a VM without disks 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "full",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"jobName": "default-backupNg",
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "skipped",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with a VM without disks 2`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"message": "no disks found",
|
||||
"name": "Error",
|
||||
"stack": Any<String>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "skipped",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with no matching VMs 1`] = `[JsonRpcError: unknown error from the peer]`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with non-existent vm 1`] = `
|
||||
Array [
|
||||
Object {
|
||||
"data": Object {
|
||||
"vms": Array [
|
||||
"non-existent-id",
|
||||
],
|
||||
},
|
||||
"message": "missingVms",
|
||||
},
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job without schedule 1`] = `[JsonRpcError: invalid parameters]`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run backup job without retentions 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "full",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"jobName": "default-backupNg",
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "failure",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run backup job without retentions 2`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"message": "copy, export and snapshot retentions cannot both be 0",
|
||||
"name": "Error",
|
||||
"stack": Any<String>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "failure",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "full",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"jobName": "default-backupNg",
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 2`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": "snapshot",
|
||||
"result": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 3`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
392
packages/xo-server-test/src/backupNg/backupNg.spec.js
Normal file
@@ -0,0 +1,392 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import config from '../_config'
|
||||
import randomId from '../_randomId'
|
||||
import xo from '../_xoConnection'
|
||||
|
||||
const DEFAULT_SCHEDULE = {
|
||||
name: 'scheduleTest',
|
||||
cron: '0 * * * * *',
|
||||
}
|
||||
|
||||
describe('backupNg', () => {
|
||||
let defaultBackupNg
|
||||
|
||||
beforeAll(() => {
|
||||
defaultBackupNg = {
|
||||
name: 'default-backupNg',
|
||||
mode: 'full',
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
settings: {
|
||||
'': {
|
||||
reportWhen: 'never',
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
describe('.createJob() :', () => {
|
||||
it('creates a new backup job without schedules', async () => {
|
||||
const backupNg = await xo.createTempBackupNgJob(defaultBackupNg)
|
||||
expect(backupNg).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
userId: expect.any(String),
|
||||
vms: expect.any(Object),
|
||||
})
|
||||
expect(backupNg.vms).toEqual(defaultBackupNg.vms)
|
||||
expect(backupNg.userId).toBe(xo._user.id)
|
||||
})
|
||||
|
||||
it('creates a new backup job with schedules', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
})
|
||||
|
||||
const backupNgJob = await xo.call('backupNg.getJob', { id: jobId })
|
||||
|
||||
expect(backupNgJob).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
userId: expect.any(String),
|
||||
settings: expect.any(Object),
|
||||
vms: expect.any(Object),
|
||||
})
|
||||
expect(backupNgJob.vms).toEqual(defaultBackupNg.vms)
|
||||
expect(backupNgJob.userId).toBe(xo._user.id)
|
||||
|
||||
expect(Object.keys(backupNgJob.settings).length).toBe(2)
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
expect(backupNgJob.settings[schedule.id]).toEqual({
|
||||
snapshotRetention: 1,
|
||||
})
|
||||
|
||||
expect(schedule).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.delete() :', () => {
|
||||
it('deletes a backup job', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.call('backupNg.createJob', {
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
await xo.call('backupNg.deleteJob', { id: jobId })
|
||||
|
||||
let isRejectedJobErrorValid = false
|
||||
await xo.call('backupNg.getJob', { id: jobId }).catch(error => {
|
||||
isRejectedJobErrorValid = noSuchObject.is(error)
|
||||
})
|
||||
expect(isRejectedJobErrorValid).toBe(true)
|
||||
|
||||
let isRejectedScheduleErrorValid = false
|
||||
await xo.call('schedule.get', { id: schedule.id }).catch(error => {
|
||||
isRejectedScheduleErrorValid = noSuchObject.is(error)
|
||||
})
|
||||
expect(isRejectedScheduleErrorValid).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.runJob() :', () => {
|
||||
it('fails trying to run a backup job without schedule', async () => {
|
||||
const { id } = await xo.createTempBackupNgJob(defaultBackupNg)
|
||||
await expect(xo.call('backupNg.runJob', { id })).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to run a backup job with no matching VMs', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
name: 'test-vm-backupNg',
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
await expect(
|
||||
xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to run a backup job with non-existent vm', async () => {
|
||||
jest.setTimeout(7e3)
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
vms: {
|
||||
id: 'non-existent-id',
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
const [log] = await xo.call('backupNg.getLogs', {
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
expect(log.warnings).toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to run a backup job with a VM without disks', async () => {
|
||||
jest.setTimeout(8e3)
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const vmIdWithoutDisks = await xo.createTempVm({
|
||||
name_label: 'XO Test Without Disks',
|
||||
name_description: 'Creating a vm without disks',
|
||||
template: config.templates.default,
|
||||
})
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
vms: {
|
||||
id: vmIdWithoutDisks,
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
|
||||
const [
|
||||
{
|
||||
tasks: [vmTask],
|
||||
...log
|
||||
},
|
||||
] = await xo.call('backupNg.getLogs', {
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
expect(log).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
})
|
||||
|
||||
expect(vmTask).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
data: {
|
||||
id: expect.any(String),
|
||||
},
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
result: {
|
||||
stack: expect.any(String),
|
||||
},
|
||||
start: expect.any(Number),
|
||||
})
|
||||
|
||||
expect(vmTask.data.id).toBe(vmIdWithoutDisks)
|
||||
})
|
||||
|
||||
it('fails trying to run backup job without retentions', async () => {
|
||||
jest.setTimeout(7e3)
|
||||
const scheduleTempId = randomId()
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
remotes: {
|
||||
id: remoteId,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: {},
|
||||
},
|
||||
srs: {
|
||||
id: config.srs.default,
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
|
||||
const [
|
||||
{
|
||||
tasks: [task],
|
||||
...log
|
||||
},
|
||||
] = await xo.call('backupNg.getLogs', {
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
|
||||
expect(log).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
})
|
||||
|
||||
expect(task).toMatchSnapshot({
|
||||
data: {
|
||||
id: expect.any(String),
|
||||
},
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
result: {
|
||||
stack: expect.any(String),
|
||||
},
|
||||
start: expect.any(Number),
|
||||
})
|
||||
expect(task.data.id).toBe(config.vms.default)
|
||||
})
|
||||
})
|
||||
|
||||
test('execute three times a rolling snapshot with 2 as retention & revert to an old state', async () => {
|
||||
jest.setTimeout(6e4)
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const vmId = await xo.createTempVm({
|
||||
name_label: 'XO Test Temp',
|
||||
name_description: 'Creating a temporary vm',
|
||||
template: config.templates.default,
|
||||
VDIs: [
|
||||
{
|
||||
size: 1,
|
||||
SR: config.srs.default,
|
||||
type: 'user',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
vms: {
|
||||
id: vmId,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 2 },
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
for (let i = 0; i < 3; i++) {
|
||||
const oldSnapshots = xo.objects.all[vmId].snapshots
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
await xo.waitObjectState(vmId, ({ snapshots }) => {
|
||||
// Test on updating snapshots.
|
||||
expect(snapshots).not.toEqual(oldSnapshots)
|
||||
})
|
||||
}
|
||||
|
||||
const { snapshots, videoram: oldVideoram } = xo.objects.all[vmId]
|
||||
|
||||
// Test on the retention, how many snapshots should be saved.
|
||||
expect(snapshots.length).toBe(2)
|
||||
|
||||
const newVideoram = 16
|
||||
await xo.call('vm.set', { id: vmId, videoram: newVideoram })
|
||||
await xo.waitObjectState(vmId, ({ videoram }) => {
|
||||
expect(videoram).toBe(newVideoram.toString())
|
||||
})
|
||||
|
||||
await xo.call('vm.revert', {
|
||||
snapshot: snapshots[0],
|
||||
})
|
||||
|
||||
await xo.waitObjectState(vmId, ({ videoram }) => {
|
||||
expect(videoram).toBe(oldVideoram)
|
||||
})
|
||||
|
||||
const [
|
||||
{
|
||||
tasks: [{ tasks: subTasks, ...vmTask }],
|
||||
...log
|
||||
},
|
||||
] = await xo.call('backupNg.getLogs', {
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
|
||||
expect(log).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
})
|
||||
|
||||
const subTaskSnapshot = subTasks.find(
|
||||
({ message }) => message === 'snapshot'
|
||||
)
|
||||
expect(subTaskSnapshot).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
result: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
})
|
||||
|
||||
expect(vmTask).toMatchSnapshot({
|
||||
data: {
|
||||
id: expect.any(String),
|
||||
},
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
})
|
||||
expect(vmTask.data.id).toBe(vmId)
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,76 @@
|
||||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`job .create() : creates a new job 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.snapshot",
|
||||
"name": "jobTest",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`job .create() : fails trying to create a job without job params 1`] = `[JsonRpcError: invalid parameters]`;
|
||||
|
||||
exports[`job .delete() : deletes an existing job 1`] = `[JsonRpcError: no such job [object Object]]`;
|
||||
|
||||
exports[`job .delete() : deletes an existing job 2`] = `[JsonRpcError: no such schedule [object Object]]`;
|
||||
|
||||
exports[`job .get() : fails trying to get a job with a non existent id 1`] = `[JsonRpcError: no such job [object Object]]`;
|
||||
|
||||
exports[`job .get() : gets an existing job 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.snapshot",
|
||||
"name": "jobTest",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`job .getAll() : gets all available jobs 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.snapshot",
|
||||
"name": "jobTest",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`job .getAll() : gets all available jobs 2`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.snapshot",
|
||||
"name": "jobTest2",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`job .set() : fails trying to set a job without job.id 1`] = `[JsonRpcError: invalid parameters]`;
|
||||
|
||||
exports[`job .set() : sets a job 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.clone",
|
||||
"name": "jobTest",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
226
packages/xo-server-test/src/job/job.spec.js
Normal file
@@ -0,0 +1,226 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { difference, keyBy } from 'lodash'
|
||||
|
||||
import config from '../_config'
|
||||
import xo, { testWithOtherConnection } from '../_xoConnection'
|
||||
|
||||
const ADMIN_USER = {
|
||||
email: 'admin2@admin.net',
|
||||
password: 'admin',
|
||||
permission: 'admin',
|
||||
}
|
||||
|
||||
describe('job', () => {
|
||||
let defaultJob
|
||||
|
||||
beforeAll(() => {
|
||||
defaultJob = {
|
||||
name: 'jobTest',
|
||||
timeout: 2000,
|
||||
type: 'call',
|
||||
key: 'snapshot',
|
||||
method: 'vm.snapshot',
|
||||
paramsVector: {
|
||||
type: 'crossProduct',
|
||||
items: [
|
||||
{
|
||||
type: 'set',
|
||||
values: [
|
||||
{
|
||||
id: config.vms.default,
|
||||
name: 'test-snapshot',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
describe('.create() :', () => {
|
||||
it('creates a new job', async () => {
|
||||
jest.setTimeout(6e3)
|
||||
const userId = await xo.createTempUser(ADMIN_USER)
|
||||
const { email, password } = ADMIN_USER
|
||||
await testWithOtherConnection({ email, password }, async xo => {
|
||||
const id = await xo.call('job.create', { job: defaultJob })
|
||||
expect(typeof id).toBe('string')
|
||||
|
||||
const job = await xo.call('job.get', { id })
|
||||
expect(job).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(job.paramsVector).toEqual(defaultJob.paramsVector)
|
||||
expect(job.userId).toBe(userId)
|
||||
await xo.call('job.delete', { id })
|
||||
})
|
||||
})
|
||||
|
||||
it('creates a job with a userId', async () => {
|
||||
const userId = await xo.createTempUser(ADMIN_USER)
|
||||
const id = await xo.createTempJob({ ...defaultJob, userId })
|
||||
const { userId: expectedUserId } = await xo.call('job.get', { id })
|
||||
expect(userId).toBe(expectedUserId)
|
||||
})
|
||||
|
||||
it('fails trying to create a job without job params', async () => {
|
||||
await expect(xo.createTempJob({})).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.getAll() :', () => {
|
||||
it('gets all available jobs', async () => {
|
||||
const jobId1 = await xo.createTempJob(defaultJob)
|
||||
const job2 = {
|
||||
...defaultJob,
|
||||
name: 'jobTest2',
|
||||
paramsVector: {
|
||||
type: 'crossProduct',
|
||||
items: [
|
||||
{
|
||||
type: 'set',
|
||||
values: [
|
||||
{
|
||||
id: config.vms.default,
|
||||
name: 'test2-snapshot',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
const jobId2 = await xo.createTempJob(job2)
|
||||
let jobs = await xo.call('job.getAll')
|
||||
expect(Array.isArray(jobs)).toBe(true)
|
||||
jobs = keyBy(jobs, 'id')
|
||||
|
||||
const newJob1 = jobs[jobId1]
|
||||
expect(newJob1).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(newJob1.paramsVector).toEqual(defaultJob.paramsVector)
|
||||
|
||||
const newJob2 = jobs[jobId2]
|
||||
expect(newJob2).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(newJob2.paramsVector).toEqual(job2.paramsVector)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.get() :', () => {
|
||||
it('gets an existing job', async () => {
|
||||
const id = await xo.createTempJob(defaultJob)
|
||||
const job = await xo.call('job.get', { id })
|
||||
expect(job).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(job.paramsVector).toEqual(defaultJob.paramsVector)
|
||||
})
|
||||
|
||||
it('fails trying to get a job with a non existent id', async () => {
|
||||
await expect(
|
||||
xo.call('job.get', { id: 'non-existent-id' })
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.set() :', () => {
|
||||
it('sets a job', async () => {
|
||||
const id = await xo.createTempJob(defaultJob)
|
||||
const job = {
|
||||
id,
|
||||
type: 'call',
|
||||
key: 'snapshot',
|
||||
method: 'vm.clone',
|
||||
paramsVector: {
|
||||
type: 'crossProduct',
|
||||
items: [
|
||||
{
|
||||
type: 'set',
|
||||
values: [
|
||||
{
|
||||
id: config.vms.default,
|
||||
name: 'clone',
|
||||
full_copy: true,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
await xo.call('job.set', {
|
||||
job,
|
||||
})
|
||||
|
||||
const newJob = await xo.call('job.get', { id })
|
||||
expect(newJob).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(newJob.paramsVector).toEqual(job.paramsVector)
|
||||
})
|
||||
|
||||
it('fails trying to set a job without job.id', async () => {
|
||||
await expect(xo.call('job.set', defaultJob)).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.delete() :', () => {
|
||||
it('deletes an existing job', async () => {
|
||||
const id = await xo.call('job.create', { job: defaultJob })
|
||||
const { id: scheduleId } = await xo.call('schedule.create', {
|
||||
jobId: id,
|
||||
cron: '* * * * * *',
|
||||
enabled: false,
|
||||
})
|
||||
await xo.call('job.delete', { id })
|
||||
await expect(xo.call('job.get', { id })).rejects.toMatchSnapshot()
|
||||
await expect(
|
||||
xo.call('schedule.get', { id: scheduleId })
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it.skip('fails trying to delete a job with a non existent id', async () => {
|
||||
await expect(
|
||||
xo.call('job.delete', { id: 'non-existent-id' })
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.runSequence() :', () => {
|
||||
let id
|
||||
|
||||
afterEach(async () => {
|
||||
await xo
|
||||
.call('vm.delete', { id, deleteDisks: true })
|
||||
.catch(error => console.error(error))
|
||||
})
|
||||
|
||||
it('runs a job', async () => {
|
||||
jest.setTimeout(7e3)
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const jobId = await xo.createTempJob(defaultJob)
|
||||
const snapshots = xo.objects.all[config.vms.default].snapshots
|
||||
await xo.call('job.runSequence', { idSequence: [jobId] })
|
||||
await xo.waitObjectState(
|
||||
config.vms.default,
|
||||
({ snapshots: actualSnapshots }) => {
|
||||
expect(actualSnapshots.length).toBe(snapshots.length + 1)
|
||||
id = difference(actualSnapshots, snapshots)[0]
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
156
packages/xo-server-test/src/old-tests/disk.spec.js
Normal file
@@ -0,0 +1,156 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import { getConfig, getMainConnection, getSrId, waitObjectState } from './util'
|
||||
import { map, assign } from 'lodash'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('disk', () => {
|
||||
let diskId
|
||||
let diskIds = []
|
||||
let serverId
|
||||
let srId
|
||||
let xo
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
xo = await getMainConnection()
|
||||
|
||||
const config = await getConfig()
|
||||
serverId = await xo.call(
|
||||
'server.add',
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', { id: serverId })
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
srId = await getSrId(xo)
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
map(diskIds, diskId => xo.call('vdi.delete', { id: diskId }))
|
||||
)
|
||||
diskIds = []
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', { id: serverId })
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async function createDisk(params) {
|
||||
const id = await xo.call('disk.create', params)
|
||||
diskIds.push(id)
|
||||
return id
|
||||
}
|
||||
|
||||
async function createDiskTest() {
|
||||
const id = await createDisk({
|
||||
name: 'diskTest',
|
||||
size: '1GB',
|
||||
sr: srId,
|
||||
})
|
||||
return id
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.create()', () => {
|
||||
it('create a new disk on a SR', async () => {
|
||||
diskId = await createDisk({
|
||||
name: 'diskTest',
|
||||
size: '1GB',
|
||||
sr: srId,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.type).to.be.equal('VDI')
|
||||
expect(disk.name_label).to.be.equal('diskTest')
|
||||
// TODO: should not test an exact value but around 10%
|
||||
expect(disk.size).to.be.equal(1000341504)
|
||||
expect(disk.$SR).to.be.equal(srId)
|
||||
}),
|
||||
waitObjectState(xo, srId, sr => {
|
||||
expect(sr.VDIs).include(diskId)
|
||||
}),
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
beforeEach(async () => {
|
||||
diskId = await createDiskTest()
|
||||
})
|
||||
|
||||
it('deletes a disk', async () => {
|
||||
await Promise.all([
|
||||
xo.call('vdi.delete', { id: diskId }),
|
||||
waitObjectState(xo, diskId, disk => {
|
||||
expect(disk).to.be.undefined()
|
||||
}),
|
||||
waitObjectState(xo, srId, sr => {
|
||||
expect(sr.VDIs).not.include(diskId)
|
||||
}),
|
||||
])
|
||||
diskIds = []
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
describe('.set()', () => {
|
||||
beforeEach(async () => {
|
||||
diskId = await createDiskTest()
|
||||
})
|
||||
|
||||
it('set the name of the disk', async () => {
|
||||
await xo.call('vdi.set', {
|
||||
id: diskId,
|
||||
name_label: 'disk2',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.name_label).to.be.equal('disk2')
|
||||
})
|
||||
})
|
||||
|
||||
it('set the description of the disk', async () => {
|
||||
await xo.call('vdi.set', {
|
||||
id: diskId,
|
||||
name_description: 'description',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.name_description).to.be.equal('description')
|
||||
})
|
||||
})
|
||||
|
||||
it.skip('set the size of the disk', async () => {
|
||||
await xo.getOrWaitObject(diskId)
|
||||
await xo.call('vdi.set', {
|
||||
id: diskId,
|
||||
size: '5MB',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.size).to.be.equal(6291456)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
59
packages/xo-server-test/src/old-tests/docker.spec.js
Normal file
@@ -0,0 +1,59 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
// import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// import {getConnection} from './util'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('docker', () => {
|
||||
// let xo
|
||||
// beforeAll(async () => {
|
||||
// xo = await getConnection()
|
||||
// })
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.register()', async () => {
|
||||
it('registers the VM for Docker management')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.deregister()', async () => {
|
||||
it('deregister the VM for Docker management')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.start()', async () => {
|
||||
it('starts the Docker')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.stop()', async () => {
|
||||
it('stops the Docker')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.restart()', async () => {
|
||||
it('restarts the Docker')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.pause()', async () => {
|
||||
it('pauses the Docker')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.unpause()', async () => {
|
||||
it('unpauses the Docker')
|
||||
})
|
||||
})
|
||||
377
packages/xo-server-test/src/old-tests/group.spec.js
Normal file
@@ -0,0 +1,377 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { find, map } from 'lodash'
|
||||
|
||||
import { createUser, deleteUsers, getUser, xo } from './util.js'
|
||||
|
||||
// ===================================================================
|
||||
describe('group', () => {
|
||||
const userIds = []
|
||||
const groupIds = []
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(map(groupIds, id => xo.call('group.delete', { id })))
|
||||
// Deleting users must be done AFTER deleting the group
|
||||
// because there is a race condition in xo-server
|
||||
// which cause some users to not be properly deleted.
|
||||
|
||||
// The test “delete the group with its users” highlight this issue.
|
||||
await deleteUsers(xo, userIds)
|
||||
userIds.length = groupIds.length = 0
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async function createGroup(params) {
|
||||
const groupId = await xo.call('group.create', params)
|
||||
groupIds.push(groupId)
|
||||
return groupId
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
function compareGroup(actual, expected) {
|
||||
expect(actual.name).toEqual(expected.name)
|
||||
expect(actual.id).toEqual(expected.id)
|
||||
expect(actual.users).toEqual(expected.users)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
function getAllGroups() {
|
||||
return xo.call('group.getAll')
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
async function getGroup(id) {
|
||||
const groups = await getAllGroups()
|
||||
return find(groups, { id: id })
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.create()', () => {
|
||||
it('creates a group and return its id', async () => {
|
||||
const groupId = await createGroup({
|
||||
name: 'Avengers',
|
||||
})
|
||||
const group = await getGroup(groupId)
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [],
|
||||
})
|
||||
})
|
||||
|
||||
it.skip('does not create two groups with the same name', async () => {
|
||||
await createGroup({
|
||||
name: 'Avengers',
|
||||
})
|
||||
|
||||
await createGroup({
|
||||
name: 'Avengers',
|
||||
}).then(
|
||||
() => {
|
||||
throw new Error('createGroup() should have thrown')
|
||||
},
|
||||
function(error) {
|
||||
expect(error.message).to.match(/duplicate group/i)
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
let groupId
|
||||
let userId1
|
||||
let userId2
|
||||
let userId3
|
||||
beforeEach(async () => {
|
||||
groupId = await xo.call('group.create', {
|
||||
name: 'Avengers',
|
||||
})
|
||||
})
|
||||
it('delete a group', async () => {
|
||||
await xo.call('group.delete', {
|
||||
id: groupId,
|
||||
})
|
||||
const group = await getGroup(groupId)
|
||||
expect(group).toBeUndefined()
|
||||
})
|
||||
|
||||
it.skip("erase the group from user's groups list", async () => {
|
||||
// create user and add it to the group
|
||||
const userId = await createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
})
|
||||
await xo.call('group.addUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
|
||||
// delete the group
|
||||
await xo.call('group.delete', { id: groupId })
|
||||
const user = await getUser(userId)
|
||||
expect(user.groups).toEqual([])
|
||||
})
|
||||
|
||||
it.skip("erase the user from group's users list", async () => {
|
||||
// create user and add it to the group
|
||||
const userId = await createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
})
|
||||
await xo.call('group.addUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
|
||||
// delete the group
|
||||
await xo.call('user.delete', { id: userId })
|
||||
const group = await getGroup(groupId)
|
||||
expect(group.users).toEqual([])
|
||||
})
|
||||
|
||||
// FIXME: some users are not properly deleted because of a race condition with group deletion.
|
||||
it.skip('delete the group with its users', async () => {
|
||||
// create users
|
||||
;[userId1, userId2, userId3] = await Promise.all([
|
||||
xo.call('user.create', {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
}),
|
||||
xo.call('user.create', {
|
||||
email: 'natasha.romanov@shield.com',
|
||||
password: 'BlackWidow',
|
||||
}),
|
||||
xo.call('user.create', {
|
||||
email: 'pietro.maximoff@shield.com',
|
||||
password: 'QickSilver',
|
||||
}),
|
||||
])
|
||||
|
||||
await xo.call('group.setUsers', {
|
||||
id: groupId,
|
||||
userIds: [userId1, userId2, userId3],
|
||||
})
|
||||
|
||||
// delete the group with his users
|
||||
await Promise.all([
|
||||
xo.call('group.delete', {
|
||||
id: groupId,
|
||||
}),
|
||||
deleteUsers(xo, [userId1, userId2, userId3]),
|
||||
])
|
||||
|
||||
const [group, user1, user2, user3] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId1),
|
||||
getUser(xo, userId2),
|
||||
getUser(xo, userId3),
|
||||
])
|
||||
|
||||
expect(group).toBeUndefined()
|
||||
expect(user1).toBeUndefined()
|
||||
expect(user2).toBeUndefined()
|
||||
expect(user3).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.getAll()', () => {
|
||||
it('returns an array', async () => {
|
||||
const groups = await xo.call('group.getAll')
|
||||
expect(groups).toBeInstanceOf(Array)
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.setUsers ()', () => {
|
||||
let groupId
|
||||
let userId1
|
||||
let userId2
|
||||
let userId3
|
||||
beforeEach(async () => {
|
||||
;[groupId, userId1, userId2, userId3] = await Promise.all([
|
||||
createGroup({
|
||||
name: 'Avengers',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'natasha.romanov@shield.com',
|
||||
password: 'BlackWidow',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'pietro.maximoff@shield.com',
|
||||
password: 'QickSilver',
|
||||
}),
|
||||
])
|
||||
})
|
||||
|
||||
it('can set users of a group', async () => {
|
||||
// add two users on the group
|
||||
await xo.call('group.setUsers', {
|
||||
id: groupId,
|
||||
userIds: [userId1, userId2],
|
||||
})
|
||||
{
|
||||
const [group, user1, user2, user3] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId1),
|
||||
getUser(xo, userId2),
|
||||
getUser(xo, userId3),
|
||||
])
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [userId1, userId2],
|
||||
})
|
||||
|
||||
expect(user1.groups).toEqual([groupId])
|
||||
expect(user2.groups).toEqual([groupId])
|
||||
expect(user3.groups).toEqual([])
|
||||
}
|
||||
|
||||
// change users of the group
|
||||
await xo.call('group.setUsers', {
|
||||
id: groupId,
|
||||
userIds: [userId1, userId3],
|
||||
})
|
||||
{
|
||||
const [group, user1, user2, user3] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId1),
|
||||
getUser(xo, userId2),
|
||||
getUser(xo, userId3),
|
||||
])
|
||||
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [userId1, userId3],
|
||||
})
|
||||
|
||||
expect(user1.groups).toEqual([groupId])
|
||||
expect(user2.groups).toEqual([])
|
||||
expect(user3.groups).toEqual([groupId])
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.addUser()', () => {
|
||||
let groupId
|
||||
let userId
|
||||
beforeEach(async () => {
|
||||
;[groupId, userId] = await Promise.all([
|
||||
createGroup({
|
||||
name: 'Avengers',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
}),
|
||||
])
|
||||
})
|
||||
|
||||
it('adds a user id to a group', async () => {
|
||||
await xo.call('group.addUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
|
||||
const [group, user] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId),
|
||||
])
|
||||
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [userId],
|
||||
})
|
||||
|
||||
expect(user.groups).toEqual([groupId])
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('removeUser()', () => {
|
||||
let groupId
|
||||
let userId
|
||||
beforeEach(async () => {
|
||||
;[groupId, userId] = await Promise.all([
|
||||
createGroup({
|
||||
name: 'Avengers',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
}),
|
||||
])
|
||||
|
||||
await xo.call('group.addUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
})
|
||||
|
||||
it('removes a user to a group', async () => {
|
||||
await xo.call('group.removeUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
|
||||
const [group, user] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId),
|
||||
])
|
||||
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [],
|
||||
})
|
||||
|
||||
expect(user.groups).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('set()', () => {
|
||||
let groupId
|
||||
beforeEach(async () => {
|
||||
groupId = await createGroup({
|
||||
name: 'Avengers',
|
||||
})
|
||||
})
|
||||
|
||||
it('changes name of a group', async () => {
|
||||
await xo.call('group.set', {
|
||||
id: groupId,
|
||||
name: 'Guardians of the Galaxy',
|
||||
})
|
||||
|
||||
const group = await getGroup(groupId)
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Guardians of the Galaxy',
|
||||
users: [],
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
239
packages/xo-server-test/src/old-tests/host.spec.js
Normal file
@@ -0,0 +1,239 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
|
||||
import expect from 'must'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import {
|
||||
getAllHosts,
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getVmToMigrateId,
|
||||
waitObjectState,
|
||||
} from './util'
|
||||
import { find, forEach } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('host', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let hostId
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
let config
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
serverId = await xo.call('server.add', config.xenServer2).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
hostId = getHost(config.host1)
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', {
|
||||
id: serverId,
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
function getHost(nameLabel) {
|
||||
const hosts = getAllHosts(xo)
|
||||
const host = find(hosts, { name_label: nameLabel })
|
||||
return host.id
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.set()', () => {
|
||||
let nameLabel
|
||||
let nameDescription
|
||||
|
||||
beforeEach(async () => {
|
||||
// get values to set them at the end of the test
|
||||
const host = xo.objects.all[hostId]
|
||||
nameLabel = host.name_label
|
||||
nameDescription = host.name_description
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('host.set', {
|
||||
id: hostId,
|
||||
name_label: nameLabel,
|
||||
name_description: nameDescription,
|
||||
})
|
||||
})
|
||||
|
||||
it('changes properties of the host', async () => {
|
||||
await xo.call('host.set', {
|
||||
id: hostId,
|
||||
name_label: 'labTest',
|
||||
name_description: 'description',
|
||||
})
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.name_label).to.be.equal('labTest')
|
||||
expect(host.name_description).to.be.equal('description')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.restart()', () => {
|
||||
jest.setTimeout(330e3)
|
||||
it('restart the host', async () => {
|
||||
await xo.call('host.restart', { id: hostId })
|
||||
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.current_operations)
|
||||
})
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Halted')
|
||||
})
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Running')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.restartAgent()', () => {
|
||||
it('restart a Xen agent on the host')
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.start()', () => {
|
||||
jest.setTimeout(300e3)
|
||||
beforeEach(async () => {
|
||||
try {
|
||||
await xo.call('host.stop', { id: hostId })
|
||||
} catch (_) {}
|
||||
|
||||
// test if the host is shutdown
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Halted')
|
||||
})
|
||||
})
|
||||
|
||||
it('start the host', async () => {
|
||||
await xo.call('host.start', { id: hostId })
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Running')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.stop()', () => {
|
||||
jest.setTimeout(300e3)
|
||||
let vmId
|
||||
|
||||
beforeAll(async () => {
|
||||
vmId = await getVmToMigrateId(xo)
|
||||
try {
|
||||
await xo.call('vm.start', { id: vmId })
|
||||
} catch (_) {}
|
||||
try {
|
||||
await xo.call('vm.migrate', {
|
||||
vm: vmId,
|
||||
host: hostId,
|
||||
})
|
||||
} catch (_) {}
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('host.start', { id: hostId })
|
||||
})
|
||||
|
||||
it('stop the host and shutdown its VMs', async () => {
|
||||
await xo.call('host.stop', { id: hostId })
|
||||
await Promise.all([
|
||||
waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.$container).not.to.be.equal(hostId)
|
||||
expect(vm.power_state).to.be.equal('Halted')
|
||||
}),
|
||||
waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Halted')
|
||||
}),
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.detach()', () => {
|
||||
it('ejects the host of a pool')
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.disable(), ', () => {
|
||||
afterEach(async () => {
|
||||
await xo.call('host.enable', {
|
||||
id: hostId,
|
||||
})
|
||||
})
|
||||
|
||||
it('disables to create VM on the host', async () => {
|
||||
await xo.call('host.disable', { id: hostId })
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.enabled).to.be.false()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.enable()', async () => {
|
||||
beforeEach(async () => {
|
||||
await xo.call('host.disable', { id: hostId })
|
||||
})
|
||||
|
||||
it('enables to create VM on the host', async () => {
|
||||
await xo.call('host.enable', { id: hostId })
|
||||
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.enabled).to.be.true()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
describe('.createNetwork()', () => {
|
||||
it('create a network')
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.listMissingPatches()', () => {
|
||||
it('returns an array of missing patches in the host')
|
||||
it('returns a empty array if up-to-date')
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.installPatch()', () => {
|
||||
it('installs a patch patch on the host')
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.stats()', () => {
|
||||
it('returns an array with statistics of the host', async () => {
|
||||
const stats = await xo.call('host.stats', {
|
||||
host: hostId,
|
||||
})
|
||||
expect(stats).to.be.an.object()
|
||||
|
||||
forEach(stats, function(array, key) {
|
||||
expect(array).to.be.an.array()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
79
packages/xo-server-test/src/old-tests/pool.spec.js
Normal file
@@ -0,0 +1,79 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import { getConfig, getMainConnection, waitObjectState } from './util'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import { find } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('pool', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let poolId
|
||||
let config
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
serverId = await xo.call('server.add', config.xenServer1).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
poolId = getPoolId()
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', {
|
||||
id: serverId,
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
function getPoolId() {
|
||||
const pools = xo.objects.indexes.type.pool
|
||||
const pool = find(pools, { name_label: config.pool.name_label })
|
||||
return pool.id
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.set()', () => {
|
||||
afterEach(async () => {
|
||||
await xo.call('pool.set', {
|
||||
id: poolId,
|
||||
name_label: config.pool.name_label,
|
||||
name_description: '',
|
||||
})
|
||||
})
|
||||
it.skip('set pool parameters', async () => {
|
||||
await xo.call('pool.set', {
|
||||
id: poolId,
|
||||
name_label: 'nameTest',
|
||||
name_description: 'description',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, poolId, pool => {
|
||||
expect(pool.name_label).to.be.equal('nameTest')
|
||||
expect(pool.name_description).to.be.equal('description')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.installPatch()', () => {
|
||||
it('install a patch on the pool')
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('handlePatchUpload()', () => {
|
||||
it('')
|
||||
})
|
||||
})
|
||||
33
packages/xo-server-test/src/old-tests/role.spec.js
Normal file
@@ -0,0 +1,33 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { xo } from './util'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('role', () => {
|
||||
describe('.getAll()', () => {
|
||||
it(' returns all the roles', async () => {
|
||||
const role = await xo.call('role.getAll')
|
||||
|
||||
// FIXME: use permutationOf but figure out how not to compare objects by
|
||||
// equality.
|
||||
expect(role).toEqual([
|
||||
{
|
||||
id: 'viewer',
|
||||
name: 'Viewer',
|
||||
permissions: ['view'],
|
||||
},
|
||||
{
|
||||
id: 'operator',
|
||||
name: 'Operator',
|
||||
permissions: ['view', 'operate'],
|
||||
},
|
||||
{
|
||||
id: 'admin',
|
||||
name: 'Admin',
|
||||
permissions: ['view', 'operate', 'administrate'],
|
||||
},
|
||||
])
|
||||
})
|
||||
})
|
||||
})
|
||||
149
packages/xo-server-test/src/old-tests/schedule.spec.js
Normal file
@@ -0,0 +1,149 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import {
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getSchedule,
|
||||
jobTest,
|
||||
scheduleTest,
|
||||
} from './util'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import { map } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('schedule', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let scheduleIds = []
|
||||
let jobId
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
let config
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
|
||||
serverId = await xo.call('server.add', config.xenServer1).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
jobId = await jobTest(xo)
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await Promise.all([
|
||||
xo.call('job.delete', { id: jobId }),
|
||||
xo.call('server.remove', { id: serverId }),
|
||||
])
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
map(scheduleIds, scheduleId =>
|
||||
xo.call('schedule.delete', { id: scheduleId })
|
||||
)
|
||||
)
|
||||
scheduleIds = []
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async function createSchedule(params) {
|
||||
const schedule = await xo.call('schedule.create', params)
|
||||
scheduleIds.push(schedule.id)
|
||||
return schedule
|
||||
}
|
||||
|
||||
async function createScheduleTest() {
|
||||
const schedule = await scheduleTest(xo, jobId)
|
||||
scheduleIds.push(schedule.id)
|
||||
return schedule
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.getAll()', () => {
|
||||
it('gets all existing schedules', async () => {
|
||||
const schedules = await xo.call('schedule.getAll')
|
||||
expect(schedules).to.be.an.array()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.get()', () => {
|
||||
let scheduleId
|
||||
beforeAll(async () => {
|
||||
scheduleId = (await createScheduleTest()).id
|
||||
})
|
||||
|
||||
it('gets an existing schedule', async () => {
|
||||
const schedule = await xo.call('schedule.get', { id: scheduleId })
|
||||
expect(schedule.job).to.be.equal(jobId)
|
||||
expect(schedule.cron).to.be.equal('* * * * * *')
|
||||
expect(schedule.enabled).to.be.false()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.create()', () => {
|
||||
it('creates a new schedule', async () => {
|
||||
const schedule = await createSchedule({
|
||||
jobId: jobId,
|
||||
cron: '* * * * * *',
|
||||
enabled: true,
|
||||
})
|
||||
expect(schedule.job).to.be.equal(jobId)
|
||||
expect(schedule.cron).to.be.equal('* * * * * *')
|
||||
expect(schedule.enabled).to.be.true()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.set()', () => {
|
||||
let scheduleId
|
||||
beforeAll(async () => {
|
||||
scheduleId = (await createScheduleTest()).id
|
||||
})
|
||||
it('modifies an existing schedule', async () => {
|
||||
await xo.call('schedule.set', {
|
||||
id: scheduleId,
|
||||
cron: '2 * * * * *',
|
||||
})
|
||||
|
||||
const schedule = await getSchedule(xo, scheduleId)
|
||||
expect(schedule.cron).to.be.equal('2 * * * * *')
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
let scheduleId
|
||||
beforeEach(async () => {
|
||||
scheduleId = (await createScheduleTest()).id
|
||||
})
|
||||
it('deletes an existing schedule', async () => {
|
||||
await xo.call('schedule.delete', { id: scheduleId })
|
||||
await getSchedule(xo, scheduleId).then(
|
||||
() => {
|
||||
throw new Error('getSchedule() should have thrown')
|
||||
},
|
||||
function(error) {
|
||||
expect(error.message).to.match(/no such object/)
|
||||
}
|
||||
)
|
||||
scheduleIds = []
|
||||
})
|
||||
})
|
||||
})
|
||||
82
packages/xo-server-test/src/old-tests/scheduler.spec.js
Normal file
@@ -0,0 +1,82 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import {
|
||||
jobTest,
|
||||
scheduleTest,
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getSchedule,
|
||||
} from './util'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('scheduler', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let jobId
|
||||
let scheduleId
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
let config
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
|
||||
serverId = await xo.call('server.add', config.xenServer1).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
jobId = await jobTest(xo)
|
||||
scheduleId = (await scheduleTest(xo, jobId)).id
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await Promise.all([
|
||||
xo.call('schedule.delete', { id: scheduleId }),
|
||||
xo.call('job.delete', { id: jobId }),
|
||||
xo.call('server.remove', { id: serverId }),
|
||||
])
|
||||
})
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.enable()', () => {
|
||||
afterEach(async () => {
|
||||
await xo.call('scheduler.disable', { id: scheduleId })
|
||||
})
|
||||
it.skip("enables a schedule to run it's job as scheduled", async () => {
|
||||
await xo.call('scheduler.enable', { id: scheduleId })
|
||||
const schedule = await getSchedule(xo, scheduleId)
|
||||
expect(schedule.enabled).to.be.true()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.disable()', () => {
|
||||
beforeEach(async () => {
|
||||
await xo.call('schedule.enable', { id: scheduleId })
|
||||
})
|
||||
it.skip('disables a schedule', async () => {
|
||||
await xo.call('schedule.disable', { id: scheduleId })
|
||||
const schedule = await getSchedule(xo, scheduleId)
|
||||
expect(schedule.enabled).to.be.false()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.getScheduleTable()', () => {
|
||||
it('get a map of existing schedules', async () => {
|
||||
const table = await xo.call('scheduler.getScheduleTable')
|
||||
expect(table).to.be.an.object()
|
||||
expect(table).to.match(scheduleId)
|
||||
})
|
||||
})
|
||||
})
|
||||
208
packages/xo-server-test/src/old-tests/server.spec.js
Normal file
@@ -0,0 +1,208 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { assign, find, map } from 'lodash'
|
||||
|
||||
import { config, rejectionOf, xo } from './util'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('server', () => {
|
||||
let serverIds = []
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
map(serverIds, serverId => xo.call('server.remove', { id: serverId }))
|
||||
)
|
||||
serverIds = []
|
||||
})
|
||||
|
||||
async function addServer(params) {
|
||||
const serverId = await xo.call('server.add', params)
|
||||
serverIds.push(serverId)
|
||||
return serverId
|
||||
}
|
||||
|
||||
function getAllServers() {
|
||||
return xo.call('server.getAll')
|
||||
}
|
||||
|
||||
async function getServer(id) {
|
||||
const servers = await getAllServers()
|
||||
return find(servers, { id: id })
|
||||
}
|
||||
|
||||
// ==================================================================
|
||||
|
||||
describe('.add()', () => {
|
||||
it('add a Xen server and return its id', async () => {
|
||||
const serverId = await addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(typeof server.id).toBe('string')
|
||||
expect(server).toEqual({
|
||||
id: serverId,
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
status: 'disconnected',
|
||||
})
|
||||
})
|
||||
|
||||
it('does not add two servers with the same host', async () => {
|
||||
await addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
expect(
|
||||
(await rejectionOf(
|
||||
addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
)).message
|
||||
).toBe('unknown error from the peer')
|
||||
})
|
||||
|
||||
it('set autoConnect true by default', async () => {
|
||||
const serverId = await addServer(config.xenServer1)
|
||||
const server = await getServer(serverId)
|
||||
|
||||
expect(server.id).toBe(serverId)
|
||||
expect(server.host).toBe('192.168.100.3')
|
||||
expect(server.username).toBe('root')
|
||||
expect(server.status).toMatch(/^connect(?:ed|ing)$/)
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.remove()', () => {
|
||||
let serverId
|
||||
beforeEach(async () => {
|
||||
serverId = await addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
})
|
||||
|
||||
it('remove a Xen server', async () => {
|
||||
await xo.call('server.remove', {
|
||||
id: serverId,
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.getAll()', () => {
|
||||
it('returns an array', async () => {
|
||||
const servers = await xo.call('server.getAll')
|
||||
|
||||
expect(servers).toBeInstanceOf(Array)
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.set()', () => {
|
||||
let serverId
|
||||
beforeEach(async () => {
|
||||
serverId = await addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
})
|
||||
|
||||
it('changes attributes of an existing server', async () => {
|
||||
await xo.call('server.set', {
|
||||
id: serverId,
|
||||
username: 'root2',
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server).toEqual({
|
||||
id: serverId,
|
||||
host: 'xen1.example.org',
|
||||
username: 'root2',
|
||||
status: 'disconnected',
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.connect()', () => {
|
||||
jest.setTimeout(5e3)
|
||||
|
||||
it('connects to a Xen server', async () => {
|
||||
const serverId = await addServer(
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
|
||||
await xo.call('server.connect', {
|
||||
id: serverId,
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server).toEqual({
|
||||
enabled: 'true',
|
||||
id: serverId,
|
||||
host: '192.168.100.3',
|
||||
username: 'root',
|
||||
status: 'connected',
|
||||
})
|
||||
})
|
||||
|
||||
it.skip('connect to a Xen server on a slave host', async () => {
|
||||
const serverId = await addServer(config.slaveServer)
|
||||
await xo.call('server.connect', { id: serverId })
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server.status).toBe('connected')
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.disconnect()', () => {
|
||||
jest.setTimeout(5e3)
|
||||
let serverId
|
||||
beforeEach(async () => {
|
||||
serverId = await addServer(
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', {
|
||||
id: serverId,
|
||||
})
|
||||
})
|
||||
|
||||
it('disconnects to a Xen server', async () => {
|
||||
await xo.call('server.disconnect', {
|
||||
id: serverId,
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server).toEqual({
|
||||
id: serverId,
|
||||
host: '192.168.100.3',
|
||||
username: 'root',
|
||||
status: 'disconnected',
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
53
packages/xo-server-test/src/old-tests/token.spec.js
Normal file
@@ -0,0 +1,53 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import defer from 'golike-defer'
|
||||
import { map } from 'lodash'
|
||||
|
||||
import { getConnection, rejectionOf, testConnection, xo } from './util.js'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('token', () => {
|
||||
const tokens = []
|
||||
|
||||
afterAll(async () => {
|
||||
await Promise.all(map(tokens, token => xo.call('token.delete', { token })))
|
||||
})
|
||||
|
||||
async function createToken() {
|
||||
const token = await xo.call('token.create')
|
||||
tokens.push(token)
|
||||
return token
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.create()', () => {
|
||||
it('creates a token string which can be used to sign in', async () => {
|
||||
const token = await createToken()
|
||||
|
||||
await testConnection({ credentials: { token } })
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
it(
|
||||
'deletes a token',
|
||||
defer(async $defer => {
|
||||
const token = await createToken()
|
||||
const xo2 = await getConnection({ credentials: { token } })
|
||||
$defer(() => xo2.close())
|
||||
|
||||
await xo2.call('token.delete', {
|
||||
token,
|
||||
})
|
||||
|
||||
expect(
|
||||
(await rejectionOf(testConnection({ credentials: { token } }))).code
|
||||
).toBe(3)
|
||||
})
|
||||
)
|
||||
})
|
||||
})
|
||||
169
packages/xo-server-test/src/old-tests/vbd.spec.js
Normal file
@@ -0,0 +1,169 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import {
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getVmXoTestPvId,
|
||||
getOneHost,
|
||||
waitObjectState,
|
||||
} from './util'
|
||||
import { assign, map } from 'lodash'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('vbd', () => {
|
||||
let xo
|
||||
let vbdId
|
||||
let diskIds = []
|
||||
let serverId
|
||||
let vmId
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
let config
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
|
||||
serverId = await xo.call(
|
||||
'server.add',
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', { id: serverId })
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
vmId = await getVmXoTestPvId(xo)
|
||||
try {
|
||||
await xo.call('vm.start', { id: vmId })
|
||||
} catch (_) {}
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
beforeEach(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
vbdId = await createVbd()
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
map(diskIds, diskId => xo.call('vdi.delete', { id: diskId }))
|
||||
)
|
||||
diskIds = []
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
jest.setTimeout(5e3)
|
||||
await Promise.all([
|
||||
xo.call('vm.stop', { id: vmId }),
|
||||
xo.call('server.remove', { id: serverId }),
|
||||
])
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
async function createVbd() {
|
||||
// Create disk
|
||||
const pool = await xo.getOrWaitObject(getOneHost(xo).$poolId)
|
||||
const diskId = await xo.call('disk.create', {
|
||||
name: 'diskTest',
|
||||
size: '1MB',
|
||||
sr: pool.default_SR,
|
||||
})
|
||||
diskIds.push(diskId)
|
||||
|
||||
// Create VBD
|
||||
await xo.call('vm.attachDisk', {
|
||||
vm: vmId,
|
||||
vdi: diskId,
|
||||
})
|
||||
const disk = await xo.waitObject(diskId)
|
||||
return disk.$VBDs[0]
|
||||
}
|
||||
|
||||
// =====================================================================
|
||||
|
||||
describe('.delete()', () => {
|
||||
it('delete the VBD', async () => {
|
||||
await xo.call('vbd.disconnect', { id: vbdId })
|
||||
await xo.call('vbd.delete', { id: vbdId })
|
||||
|
||||
await waitObjectState(xo, vbdId, vbd => {
|
||||
expect(vbd).to.be.undefined()
|
||||
})
|
||||
})
|
||||
|
||||
it('deletes the VBD only if it is deconnected', async () => {
|
||||
await xo.call('vbd.delete', { id: vbdId }).then(
|
||||
() => {
|
||||
throw new Error('vbd.delete() should have thrown')
|
||||
},
|
||||
function(error) {
|
||||
// TODO: check with Julien if it is ok
|
||||
expect(error.message).to.match('unknown error from the peer')
|
||||
}
|
||||
)
|
||||
await xo.call('vbd.disconnect', { id: vbdId })
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
|
||||
describe('.disconnect()', () => {
|
||||
it('disconnect the VBD', async () => {
|
||||
await xo.call('vbd.disconnect', { id: vbdId })
|
||||
await waitObjectState(xo, vbdId, vbd => {
|
||||
expect(vbd.attached).to.be.false()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.connect()', () => {
|
||||
beforeEach(async () => {
|
||||
await xo.call('vbd.disconnect', { id: vbdId })
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vbd.disconnect', { id: vbdId })
|
||||
})
|
||||
|
||||
it('connect the VBD', async () => {
|
||||
await xo.call('vbd.connect', { id: vbdId })
|
||||
|
||||
await waitObjectState(xo, vbdId, vbd => {
|
||||
expect(vbd.attached).to.be.true()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
describe('.set()', () => {
|
||||
afterEach(async () => {
|
||||
await xo.call('vbd.disconnect', { id: vbdId })
|
||||
})
|
||||
|
||||
// TODO: resolve problem with disconnect
|
||||
it.skip('set the position of the VBD', async () => {
|
||||
await xo.call('vbd.set', {
|
||||
id: vbdId,
|
||||
position: '10',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vbdId, vbd => {
|
||||
expect(vbd.position).to.be.equal('10')
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
133
packages/xo-server-test/src/old-tests/vif.spec.js
Normal file
@@ -0,0 +1,133 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import {
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getNetworkId,
|
||||
waitObjectState,
|
||||
getVmXoTestPvId,
|
||||
} from './util'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import { map } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('vif', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let vifIds = []
|
||||
let vmId
|
||||
let vifId
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
let config
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
|
||||
serverId = await xo.call('server.add', config.xenServer1).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
vmId = await getVmXoTestPvId(xo)
|
||||
try {
|
||||
await xo.call('vm.start', { id: vmId })
|
||||
} catch (_) {}
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
beforeEach(async () => {
|
||||
vifId = await createVif()
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
map(vifIds, vifId => xo.call('vif.delete', { id: vifId }))
|
||||
)
|
||||
vifIds = []
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
jest.setTimeout(5e3)
|
||||
await xo.call('vm.stop', { id: vmId, force: true })
|
||||
await xo.call('server.remove', { id: serverId })
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
async function createVif() {
|
||||
const networkId = await getNetworkId(xo)
|
||||
|
||||
const vifId = await xo.call('vm.createInterface', {
|
||||
vm: vmId,
|
||||
network: networkId,
|
||||
position: '1',
|
||||
})
|
||||
vifIds.push(vifId)
|
||||
|
||||
return vifId
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.delete()', () => {
|
||||
it('deletes a VIF', async () => {
|
||||
await xo.call('vif.disconnect', { id: vifId })
|
||||
await xo.call('vif.delete', { id: vifId })
|
||||
|
||||
await waitObjectState(xo, vifId, vif => {
|
||||
expect(vif).to.be.undefined()
|
||||
})
|
||||
|
||||
vifIds = []
|
||||
})
|
||||
|
||||
it('can not delete a VIF if it is connected', async () => {
|
||||
await xo.call('vif.delete', { id: vifId }).then(
|
||||
() => {
|
||||
throw new Error('vif.delete() should have thrown')
|
||||
},
|
||||
function(error) {
|
||||
expect(error.message).to.be.equal('unknown error from the peer')
|
||||
}
|
||||
)
|
||||
await xo.call('vif.disconnect', { id: vifId })
|
||||
})
|
||||
})
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
describe('.disconnect()', () => {
|
||||
it('disconnects a VIF', async () => {
|
||||
await xo.call('vif.disconnect', { id: vifId })
|
||||
await waitObjectState(xo, vifId, vif => {
|
||||
expect(vif.attached).to.be.false()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
describe('.connect()', () => {
|
||||
beforeEach(async () => {
|
||||
await xo.call('vif.disconnect', { id: vifId })
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vif.disconnect', { id: vifId })
|
||||
})
|
||||
it('connects a VIF', async () => {
|
||||
await xo.call('vif.connect', { id: vifId })
|
||||
await waitObjectState(xo, vifId, vif => {
|
||||
expect(vif.attached).to.be.true()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
666
packages/xo-server-test/src/old-tests/vm.spec.js
Normal file
@@ -0,0 +1,666 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import {
|
||||
almostEqual,
|
||||
getAllHosts,
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getNetworkId,
|
||||
getOneHost,
|
||||
getSrId,
|
||||
getVmToMigrateId,
|
||||
getVmXoTestPvId,
|
||||
waitObjectState,
|
||||
} from './util'
|
||||
import { map, find } from 'lodash'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('vm', () => {
|
||||
let xo
|
||||
let vmId
|
||||
let vmIds = []
|
||||
let serverId
|
||||
let config
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
serverId = await xo.call('server.add', config.xenServer1).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
})
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
jest.setTimeout(15e3)
|
||||
await Promise.all(
|
||||
map(vmIds, vmId => xo.call('vm.delete', { id: vmId, delete_disks: true }))
|
||||
)
|
||||
vmIds = []
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', {
|
||||
id: serverId,
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
async function createVm(params) {
|
||||
const vmId = await xo.call('vm.create', params)
|
||||
vmIds.push(vmId)
|
||||
return vmId
|
||||
}
|
||||
|
||||
async function createVmTest() {
|
||||
const templateId = getTemplateId(config.templates.debian)
|
||||
const vmId = await createVm({
|
||||
name_label: 'vmTest',
|
||||
template: templateId,
|
||||
VIFs: [],
|
||||
})
|
||||
return vmId
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
async function getCdVbdPosition(vmId) {
|
||||
const vm = await xo.getOrWaitObject(vmId)
|
||||
for (let i = 0; i < vm.$VBDs.length; i++) {
|
||||
const vbd = await xo.getOrWaitObject(vm.$VBDs[i])
|
||||
if (vbd.is_cd_drive === true) {
|
||||
return vbd.id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function getHostOtherPool(vm) {
|
||||
const hosts = getAllHosts(xo)
|
||||
for (const id in hosts) {
|
||||
if (hosts[id].$poolId !== vm.$poolId) {
|
||||
return id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
function getIsoId() {
|
||||
const vdis = xo.objects.indexes.type.VDI
|
||||
const iso = find(vdis, { name_label: config.iso })
|
||||
return iso.id
|
||||
}
|
||||
|
||||
function getOtherHost(vm) {
|
||||
const hosts = getAllHosts(xo)
|
||||
for (const id in hosts) {
|
||||
if (hosts[id].$poolId === vm.poolId) {
|
||||
if (id !== vm.$container) {
|
||||
return id
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function getTemplateId(nameTemplate) {
|
||||
const templates = xo.objects.indexes.type['VM-template']
|
||||
const template = find(templates, { name_label: nameTemplate })
|
||||
return template.id
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.create()', () => {
|
||||
it('creates a VM with only a name and a template', async () => {
|
||||
const templateId = getTemplateId(config.templates.debian)
|
||||
|
||||
vmId = await createVm({
|
||||
name_label: 'vmTest',
|
||||
template: templateId,
|
||||
VIFs: [],
|
||||
})
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.id).to.be.a.string()
|
||||
expect(vm).to.be.an.object()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.createHVM()', () => {
|
||||
let srId
|
||||
let templateId
|
||||
|
||||
beforeAll(async () => {
|
||||
srId = await getSrId(xo)
|
||||
templateId = getTemplateId(config.templates.otherConfig)
|
||||
})
|
||||
|
||||
it.skip('creates a VM with the Other Config template, three disks, two interfaces and a ISO mounted', async () => {
|
||||
jest.setTimeout(30e3)
|
||||
|
||||
const networkId = await getNetworkId(xo)
|
||||
vmId = await createVm({
|
||||
name_label: 'vmTest',
|
||||
template: templateId,
|
||||
VIFs: [{ network: networkId }, { network: networkId }],
|
||||
VDIs: [
|
||||
{ device: '0', size: 1, SR: srId, type: 'user' },
|
||||
{ device: '1', size: 1, SR: srId, type: 'user' },
|
||||
{ device: '2', size: 1, SR: srId, type: 'user' },
|
||||
],
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.name_label).to.be.equal('vmTest')
|
||||
expect(vm.other.base_template_name).to.be.equal(
|
||||
config.templates.otherConfig
|
||||
)
|
||||
expect(vm.VIFs).to.have.length(2)
|
||||
expect(vm.$VBDs).to.have.length(3)
|
||||
})
|
||||
})
|
||||
|
||||
it.skip('creates a VM with the Other Config template, no disk, no network and a ISO mounted', async () => {
|
||||
vmId = await createVm({
|
||||
name_label: 'vmTest',
|
||||
template: templateId,
|
||||
VIFs: [],
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.other.base_template_name).to.be.equal(
|
||||
config.templates.otherConfig
|
||||
)
|
||||
expect(vm.VIFs).to.have.length(0)
|
||||
expect(vm.$VBDs).to.have.length(0)
|
||||
})
|
||||
})
|
||||
})
|
||||
describe('.createPV()', () => {
|
||||
let srId
|
||||
let templateId
|
||||
let networkId
|
||||
|
||||
beforeAll(async () => {
|
||||
;[networkId, srId] = await Promise.all([getNetworkId(xo), getSrId(xo)])
|
||||
})
|
||||
|
||||
it.skip('creates a VM with the Debian 7 64 bits template, network install, one disk, one network', async () => {
|
||||
templateId = getTemplateId(config.templates.debian)
|
||||
|
||||
vmId = await createVm({
|
||||
name_label: 'vmTest',
|
||||
template: templateId,
|
||||
VIFs: [{ network: networkId }],
|
||||
VDIs: [
|
||||
{
|
||||
device: '0',
|
||||
size: 1,
|
||||
SR: srId,
|
||||
type: 'user',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.other.base_template_name).to.be.equal(
|
||||
config.templates.debian
|
||||
)
|
||||
expect(vm.VIFs).to.have.length(1)
|
||||
expect(vm.$VBDs).to.have.length(1)
|
||||
})
|
||||
})
|
||||
|
||||
it('creates a VM with the CentOS 7 64 bits template, two disks, two networks and a ISO mounted', async () => {
|
||||
jest.setTimeout(10e3)
|
||||
|
||||
templateId = getTemplateId(config.templates.centOS)
|
||||
vmId = await createVm({
|
||||
name_label: 'vmTest',
|
||||
template: templateId,
|
||||
VIFs: [{ network: networkId }, { network: networkId }],
|
||||
VDIs: [
|
||||
{ device: '0', size: 1, SR: srId, type: 'user' },
|
||||
{ device: '1', size: 1, SR: srId, type: 'user' },
|
||||
],
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.other.base_template_name).to.be.equal(
|
||||
config.templates.centOS
|
||||
)
|
||||
expect(vm.VIFs).to.have.length(2)
|
||||
expect(vm.$VBDs).to.have.length(2)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
let snapshotIds = []
|
||||
let diskIds = []
|
||||
|
||||
beforeEach(async () => {
|
||||
vmId = await createVmTest()
|
||||
})
|
||||
|
||||
afterAll(async () => {
|
||||
await Promise.all(
|
||||
map(snapshotIds, snapshotId =>
|
||||
xo.call('vm.delete', { id: snapshotId })
|
||||
),
|
||||
map(diskIds, diskId => xo.call('vdi.delete', { id: diskId }))
|
||||
)
|
||||
})
|
||||
|
||||
it('deletes a VM', async () => {
|
||||
await xo.call('vm.delete', {
|
||||
id: vmId,
|
||||
delete_disks: true,
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm).to.be.undefined()
|
||||
})
|
||||
vmIds = []
|
||||
})
|
||||
|
||||
it('deletes a VM and its snapshots', async () => {
|
||||
const snapshotId = await xo.call('vm.snapshot', {
|
||||
id: vmId,
|
||||
name: 'snapshot',
|
||||
})
|
||||
snapshotIds.push(snapshotId)
|
||||
|
||||
await xo.call('vm.delete', {
|
||||
id: vmId,
|
||||
delete_disks: true,
|
||||
})
|
||||
vmIds = []
|
||||
await waitObjectState(xo, snapshotId, snapshot => {
|
||||
expect(snapshot).to.be.undefined()
|
||||
})
|
||||
snapshotIds = []
|
||||
})
|
||||
|
||||
it('deletes a VM and its disks', async () => {
|
||||
jest.setTimeout(5e3)
|
||||
// create disk
|
||||
const host = getOneHost(xo)
|
||||
const pool = await xo.getOrWaitObject(host.$poolId)
|
||||
|
||||
const diskId = await xo.call('disk.create', {
|
||||
name: 'diskTest',
|
||||
size: '1GB',
|
||||
sr: pool.default_SR,
|
||||
})
|
||||
diskIds.push(diskId)
|
||||
|
||||
// attach the disk on the VM
|
||||
await xo.call('vm.attachDisk', {
|
||||
vm: vmId,
|
||||
vdi: diskId,
|
||||
})
|
||||
|
||||
// delete the VM
|
||||
await xo.call('vm.delete', {
|
||||
id: vmId,
|
||||
delete_disks: true,
|
||||
})
|
||||
vmIds = []
|
||||
await waitObjectState(xo, diskId, disk => {
|
||||
expect(disk).to.be.undefined()
|
||||
})
|
||||
diskIds = []
|
||||
})
|
||||
|
||||
// TODO: do a copy of the ISO
|
||||
it.skip('deletes a vm but not delete its ISO', async () => {
|
||||
vmId = await createVmTest()
|
||||
|
||||
await xo.call('vm.insertCd', {
|
||||
id: vmId,
|
||||
cd_id: '1169eb8a-d43f-4daf-a0ca-f3434a4bf301',
|
||||
force: false,
|
||||
})
|
||||
|
||||
await xo.call('vm.delete', {
|
||||
id: vmId,
|
||||
delete_disks: true,
|
||||
})
|
||||
|
||||
waitObjectState(xo, '1169eb8a-d43f-4daf-a0ca-f3434a4bf301', iso => {
|
||||
expect(iso).not.to.be.undefined()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.migrate', () => {
|
||||
jest.setTimeout(15e3)
|
||||
|
||||
let secondServerId
|
||||
let startHostId
|
||||
let hostId
|
||||
|
||||
beforeAll(async () => {
|
||||
secondServerId = await xo
|
||||
.call('server.add', config.xenServer2)
|
||||
.catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
vmId = await getVmToMigrateId(xo)
|
||||
|
||||
try {
|
||||
await xo.call('vm.start', { id: vmId })
|
||||
} catch (_) {}
|
||||
})
|
||||
beforeEach(async () => {
|
||||
const vm = await xo.getOrWaitObject(vmId)
|
||||
startHostId = vm.$container
|
||||
hostId = getOtherHost(vm)
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vm.migrate', {
|
||||
id: vmId,
|
||||
host_id: startHostId,
|
||||
})
|
||||
})
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', {
|
||||
id: secondServerId,
|
||||
})
|
||||
})
|
||||
|
||||
it('migrates the VM on an other host', async () => {
|
||||
await xo.call('vm.migrate', {
|
||||
id: vmId,
|
||||
host_id: hostId,
|
||||
})
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.$container).to.be.equal(hostId)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.migratePool()', () => {
|
||||
jest.setTimeout(100e3)
|
||||
let hostId
|
||||
let secondServerId
|
||||
let startHostId
|
||||
|
||||
beforeAll(async () => {
|
||||
secondServerId = await xo
|
||||
.call('server.add', config.xenServer2)
|
||||
.catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
vmId = await getVmToMigrateId(xo)
|
||||
|
||||
try {
|
||||
await xo.call('vm.start', { id: vmId })
|
||||
} catch (_) {}
|
||||
})
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', { id: secondServerId })
|
||||
})
|
||||
beforeEach(async () => {
|
||||
const vm = await xo.getOrWaitObject(vmId)
|
||||
startHostId = vm.$container
|
||||
hostId = getHostOtherPool(xo, vm)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
// TODO: try to get the vmId
|
||||
vmId = await getVmToMigrateId(xo)
|
||||
await xo.call('vm.migrate_pool', {
|
||||
id: vmId,
|
||||
target_host_id: startHostId,
|
||||
})
|
||||
})
|
||||
|
||||
it.skip('migrates the VM on an other host which is in an other pool', async () => {
|
||||
await xo.call('vm.migrate_pool', {
|
||||
id: vmId,
|
||||
target_host_id: hostId,
|
||||
})
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm).to.be.undefined()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
|
||||
describe('.clone()', () => {
|
||||
beforeEach(async () => {
|
||||
vmId = await createVmTest()
|
||||
})
|
||||
it('clones a VM', async () => {
|
||||
const cloneId = await xo.call('vm.clone', {
|
||||
id: vmId,
|
||||
name: 'clone',
|
||||
full_copy: true,
|
||||
})
|
||||
// push cloneId in vmIds array to delete the VM after test
|
||||
vmIds.push(cloneId)
|
||||
|
||||
const [vm, clone] = await Promise.all([
|
||||
xo.getOrWaitObject(vmId),
|
||||
xo.getOrWaitObject(cloneId),
|
||||
])
|
||||
expect(clone.type).to.be.equal('VM')
|
||||
expect(clone.name_label).to.be.equal('clone')
|
||||
|
||||
almostEqual(clone, vm, ['name_label', 'ref', 'id', 'other.mac_seed'])
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
|
||||
describe('.convert()', () => {
|
||||
beforeEach(async () => {
|
||||
vmId = await createVmTest()
|
||||
})
|
||||
|
||||
it('converts a VM', async () => {
|
||||
await xo.call('vm.convert', { id: vmId })
|
||||
await waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.type).to.be.equal('VM-template')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
describe('.revert()', () => {
|
||||
jest.setTimeout(5e3)
|
||||
let snapshotId
|
||||
beforeEach(async () => {
|
||||
vmId = await createVmTest()
|
||||
snapshotId = await xo.call('vm.snapshot', {
|
||||
id: vmId,
|
||||
name: 'snapshot',
|
||||
})
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vm.delete', { id: snapshotId })
|
||||
})
|
||||
it('reverts a snapshot to its parent VM', async () => {
|
||||
const revert = await xo.call('vm.revert', { id: snapshotId })
|
||||
expect(revert).to.be.true()
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
describe('.handleExport()', () => {
|
||||
it('')
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
|
||||
describe('.import()', () => {
|
||||
it('')
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
describe('.attachDisk()', () => {
|
||||
jest.setTimeout(5e3)
|
||||
let diskId
|
||||
beforeEach(async () => {
|
||||
vmId = await createVmTest()
|
||||
const srId = await getSrId(xo)
|
||||
diskId = await xo.call('disk.create', {
|
||||
name: 'diskTest',
|
||||
size: '1GB',
|
||||
sr: srId,
|
||||
})
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vdi.delete', { id: diskId })
|
||||
})
|
||||
|
||||
it('attaches the disk to the VM with attributes by default', async () => {
|
||||
await xo.call('vm.attachDisk', {
|
||||
vm: vmId,
|
||||
vdi: diskId,
|
||||
})
|
||||
const vm = await xo.waitObject(vmId)
|
||||
await waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.$VBDs).to.be.eql(vm.$VBDs)
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vm.$VBDs, vbd => {
|
||||
expect(vbd.type).to.be.equal('VBD')
|
||||
// expect(vbd.attached).to.be.true()
|
||||
expect(vbd.bootable).to.be.false()
|
||||
expect(vbd.is_cd_drive).to.be.false()
|
||||
expect(vbd.position).to.be.equal('0')
|
||||
expect(vbd.read_only).to.be.false()
|
||||
expect(vbd.VDI).to.be.equal(diskId)
|
||||
expect(vbd.VM).to.be.equal(vmId)
|
||||
expect(vbd.$poolId).to.be.equal(vm.$poolId)
|
||||
})
|
||||
})
|
||||
|
||||
it('attaches the disk to the VM with specified attributes', async () => {
|
||||
await xo.call('vm.attachDisk', {
|
||||
vm: vmId,
|
||||
vdi: diskId,
|
||||
bootable: true,
|
||||
mode: 'RO',
|
||||
position: '2',
|
||||
})
|
||||
const vm = await xo.waitObject(vmId)
|
||||
await waitObjectState(xo, vm.$VBDs, vbd => {
|
||||
expect(vbd.type).to.be.equal('VBD')
|
||||
// expect(vbd.attached).to.be.true()
|
||||
expect(vbd.bootable).to.be.true()
|
||||
expect(vbd.is_cd_drive).to.be.false()
|
||||
expect(vbd.position).to.be.equal('2')
|
||||
expect(vbd.read_only).to.be.true()
|
||||
expect(vbd.VDI).to.be.equal(diskId)
|
||||
expect(vbd.VM).to.be.equal(vmId)
|
||||
expect(vbd.$poolId).to.be.equal(vm.$poolId)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
describe('.createInterface()', () => {
|
||||
let vifId
|
||||
let networkId
|
||||
beforeAll(async () => {
|
||||
vmId = await getVmXoTestPvId(xo)
|
||||
networkId = await getNetworkId(xo)
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vif.delete', { id: vifId })
|
||||
})
|
||||
|
||||
it('create a VIF between the VM and the network', async () => {
|
||||
vifId = await xo.call('vm.createInterface', {
|
||||
vm: vmId,
|
||||
network: networkId,
|
||||
position: '1',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, vifId, vif => {
|
||||
expect(vif.type).to.be.equal('VIF')
|
||||
// expect(vif.attached).to.be.true()
|
||||
expect(vif.$network).to.be.equal(networkId)
|
||||
expect(vif.$VM).to.be.equal(vmId)
|
||||
expect(vif.device).to.be.equal('1')
|
||||
})
|
||||
})
|
||||
|
||||
it('can not create two interfaces on the same device', async () => {
|
||||
vifId = await xo.call('vm.createInterface', {
|
||||
vm: vmId,
|
||||
network: networkId,
|
||||
position: '1',
|
||||
})
|
||||
await xo
|
||||
.call('vm.createInterface', {
|
||||
vm: vmId,
|
||||
network: networkId,
|
||||
position: '1',
|
||||
})
|
||||
.then(
|
||||
() => {
|
||||
throw new Error('createInterface() sould have trown')
|
||||
},
|
||||
function(error) {
|
||||
expect(error.message).to.be.equal('unknown error from the peer')
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
describe('.stats()', () => {
|
||||
jest.setTimeout(20e3)
|
||||
beforeAll(async () => {
|
||||
vmId = await getVmXoTestPvId(xo)
|
||||
})
|
||||
beforeEach(async () => {
|
||||
await xo.call('vm.start', { id: vmId })
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('vm.stop', {
|
||||
id: vmId,
|
||||
force: true,
|
||||
})
|
||||
})
|
||||
|
||||
it('returns an array with statistics of the VM', async () => {
|
||||
const stats = await xo.call('vm.stats', { id: vmId })
|
||||
expect(stats).to.be.an.object()
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
describe('.bootOrder()', () => {
|
||||
it('')
|
||||
})
|
||||
})
|
||||