Compare commits

..

253 Commits

Author SHA1 Message Date
Julien Fontanet
2641966285 5.7.0 2017-03-31 16:37:22 +02:00
Julien Fontanet
09dc2265fe fix(Xapi#uploadPoolPatch): use HTTPs 2017-03-31 11:19:27 +02:00
Julien Fontanet
a1fa139ef1 fix(Xapi): accept self-signed certs for export/import VDI/VM 2017-03-31 11:19:26 +02:00
badrAZ
df26a500c4 feat(servers): add a label to servers (#523)
See vatesfr/xo-web#1965
2017-03-29 16:32:02 +02:00
badrAZ
a465218ba3 feat(jobs): configure job timeout (#522)
See vatesfr/xo-web#1956
2017-03-29 10:39:52 +02:00
Julien Fontanet
1ead6eb916 fix(Xapi): export/import of VDI/VM should use HTTPs 2017-03-28 17:33:17 +02:00
Julien Fontanet
44fe1b31ba chore(package): update all dependencies 2017-03-24 11:39:35 +01:00
Julien Fontanet
0856d3d5c9 fix: update yarn.lock 2017-03-24 11:38:32 +01:00
Pierre Donias
f6b74ea836 feat(vm): affinity host (#521)
See vatesfr/xo-web#1983
2017-03-24 11:35:00 +01:00
greenkeeper[bot]
e0cef71700 fix(package): update ms to version 1.0.0 (#520)
https://greenkeeper.io/
2017-03-19 22:49:02 +01:00
Julien Fontanet
19d1f70458 fix(xapi-objects-to-xo): PV drivers check for XS >= 7.1
Fixes vatesfr/xo-web#2024
2017-03-17 18:05:05 +01:00
badrAZ
43c103e436 feat(new-vm): shared VM in resource set (#518)
Fixes vatesfr/xo-web#1964.
2017-03-15 14:45:36 +01:00
Julien Fontanet
6ff4f096a3 chore(build): do not build tests 2017-03-10 14:46:06 +01:00
Pierre Donias
d8d82441c3 fix(Xapi#createVm): only one disk should be bootable (#516)
See vatesfr/xo-web#1994.

For more complex setups, create the VMs and change bootable flags afterward.
2017-03-08 16:31:22 +01:00
Olivier Lambert
4f489e1854 feat(sr): add lvmohba SR type management (#517) 2017-03-08 14:03:57 +01:00
Julien Fontanet
9ab275df5d fix(vm.set): behave if the resource set no longer exist 2017-03-04 16:27:25 +01:00
Julien Fontanet
66c1754eb8 chore(package): update xo-common to v0.1.1 2017-03-04 16:16:58 +01:00
Julien Fontanet
e67bab1f5c fix(package): require Node >= 4.5 2017-03-04 03:24:49 +01:00
Julien Fontanet
ceb6667112 fix: coding style 2017-03-03 16:32:58 +01:00
badrAZ
cafba0b361 fix(vm.resume): remove unused force param (#514) 2017-03-03 15:49:12 +01:00
Julien Fontanet
3e4efcf297 fix(vm.importBackup): fix datetime in VM name 2017-03-01 14:57:02 +01:00
Julien Fontanet
4ccadac148 5.6.4 2017-03-01 14:46:09 +01:00
Julien Fontanet
6e148c18b0 fix(vm.importDeltaBackup): fix datetime in VM name (#513) 2017-03-01 14:34:29 +01:00
Julien Fontanet
75f849982e fix(Xapi#exportVdi): returned promise has a cancel() method (#512) 2017-03-01 14:12:56 +01:00
Julien Fontanet
8722ef45ac chore: add mention-bot config 2017-03-01 13:27:54 +01:00
Julien Fontanet
b347c78a8c fix(vm.create): do not get stuck when installing from ISO 2017-02-28 15:43:06 +01:00
Julien Fontanet
88ae24855a feat(models/utils/parseProp): do not warn on empty string 2017-02-28 09:58:41 +01:00
Julien Fontanet
356884ea53 feat: reduce the default debugging level 2017-02-28 09:58:41 +01:00
badrAZ
51fba21dd6 feat(vm.import): throw if type is xva and data is provided (#506) 2017-02-27 11:57:40 +01:00
Julien Fontanet
6aa5d608bf chore(package): babel-preset-es2015 → babel-preset-env 2017-02-27 11:22:34 +01:00
Julien Fontanet
21ad2c5744 fix(package): commit-msg → commitmsg 2017-02-27 11:20:40 +01:00
Julien Fontanet
dea1163159 fix(package): jest is a dev dependency 2017-02-27 11:20:19 +01:00
Julien Fontanet
c4c2e8cf74 chore(package): fix for jest 19 2017-02-27 11:19:38 +01:00
Julien Fontanet
5883c35cf3 chore(package): update all dependencies 2017-02-27 11:18:46 +01:00
Julien Fontanet
4d2719a424 5.6.3 2017-02-24 16:12:25 +01:00
Nicolas Raynaud
7cf2d0d01c fix(xosan) (#509)
- fix 2 bricks configuration
- limit bricks size to 2TB
- fix arbiter cleanup
2017-02-24 16:11:59 +01:00
Julien Fontanet
5a08b512a7 fix(xosan.createSR): use address prop 2017-02-24 14:56:27 +01:00
Pierre Donias
aa6ff6cd64 fix(xapi.installSupplementalPackOnAllHosts): multi-pipe stream (#508)
Fixes vatesfr/xo-web#1957
2017-02-24 11:41:47 +01:00
Julien Fontanet
89421d292c 5.6.2 2017-02-23 18:38:14 +01:00
Julien Fontanet
55c6515bac fix(vhdMerge): ensure parent.bat.size >= child.bat.size
Fixes vatesfr/xo-web#1939
2017-02-23 18:35:52 +01:00
Pierre Donias
5db6f6a58c feat(XOSAN) (#505) 2017-02-23 18:33:23 +01:00
Julien Fontanet
eeedf6ab28 feat(Vhd#ensurebatSize): no need to wait before writing footer 2017-02-23 18:31:05 +01:00
Julien Fontanet
3758cd207b fix(Vhd#ensureBatSize): Buffer#size → Buffer#length 2017-02-23 18:31:05 +01:00
Julien Fontanet
c15ede6239 fix(Vhd#ensureBatSize): avoid style error 2017-02-23 18:31:05 +01:00
Julien Fontanet
e54e31e059 feat(vhdMerge): avoid unnecessary footer write 2017-02-23 18:31:05 +01:00
Julien Fontanet
8c573aa8e4 chore(Vhd): add debugs 2017-02-23 18:31:05 +01:00
Julien Fontanet
654559086c chore(Vhd#writeBlockSectors): expect end sector instead of # of sectors 2017-02-23 18:31:05 +01:00
Julien Fontanet
b0d9679568 chore(Vhd#coalesceBlock): do not pass blockAddr 2017-02-23 18:31:05 +01:00
Julien Fontanet
87fdaf7fa7 chore(Vhd): merge readBlockBitmap() readBlockData() into _readBlock() 2017-02-23 18:31:05 +01:00
Julien Fontanet
bb3a365166 fix(Vhd#writeBlockSectors): pass correct params to Vhd#readBlockBitmap() 2017-02-23 18:31:05 +01:00
Julien Fontanet
2be183d980 various updates 2017-02-23 18:31:05 +01:00
Julien Fontanet
c6dc846838 fix(Vhd#ensureBatSize): extend BAT at correct offset 2017-02-23 18:31:05 +01:00
Julien Fontanet
1142f1d59a various updates 2017-02-23 18:31:05 +01:00
Julien Fontanet
126c470979 fix(Vhd#getEndOfHeaders): correctly compute parent locator size 2017-02-23 18:31:05 +01:00
Julien Fontanet
d679dc3e8b fix(vhdMerge): update diskGeometry and originalSize 2017-02-23 18:31:05 +01:00
Julien Fontanet
d5422dfe89 fix(Vhd#ensureBatSize): do not round maxTableEntries 2017-02-23 18:31:05 +01:00
Julien Fontanet
d64237b4f2 fix(package): dependency-check should ignore constant-stream 2017-02-23 18:31:05 +01:00
Julien Fontanet
7b7e4942f2 fix(constant-stream): optimize data length 2017-02-23 18:31:05 +01:00
Julien Fontanet
e4c343a587 chore(Vhd#_writeStream): merge back into Vhd#_write() 2017-02-23 18:31:05 +01:00
Julien Fontanet
1a8ae21478 feat(Vhd#_write): can also handle streams 2017-02-23 18:31:05 +01:00
Julien Fontanet
dd37a5b584 chore(Vhd#_write): simplify code 2017-02-23 18:31:05 +01:00
Julien Fontanet
eec340e6c0 chore(Vhd#_getBatEntry): remove logging 2017-02-23 18:31:05 +01:00
Julien Fontanet
c2fb5ba1f0 fix(vhdMerge): update currentSize and timestamp 2017-02-23 18:31:05 +01:00
Julien Fontanet
36d7e17b86 fix(Vhd#ensureBatSize): update BAT before writing footer 2017-02-23 18:31:05 +01:00
Julien Fontanet
5a1dc49428 fix(Vhd#ensureBatSize): various fixes 2017-02-23 18:31:05 +01:00
Julien Fontanet
47caf54772 fix(Vhd#ensureBatSize): extend local BAT buffer as well 2017-02-23 18:31:05 +01:00
Julien Fontanet
6af50a8c44 feat(Vhd#ensurebatSize): do not move first block if not necessary 2017-02-23 18:31:05 +01:00
Julien Fontanet
1b27407970 chore(Vhd#_getFirstAndLastBlocks): value → sector 2017-02-23 18:31:05 +01:00
Julien Fontanet
4da6306c67 fix(Vhd#_getFirstAndLastBlocks): should not be async 2017-02-23 18:31:05 +01:00
Julien Fontanet
f950b7a725 fix(Vhd#_readBlockData) 2017-02-23 18:31:05 +01:00
Julien Fontanet
930cf9ed04 fix(Vhd#ensureBatSize): update header.maxTableEntries 2017-02-23 18:31:05 +01:00
Julien Fontanet
744016e752 chore(vhd-merge): Vhd#_read() 2017-02-23 18:31:05 +01:00
Julien Fontanet
2fb4e907df fix(vhdMerge): ensure parent.bat.size >= child.bat.size
Fixes vatesfr/xo-web#1939
2017-02-23 18:31:05 +01:00
Julien Fontanet
ef2a815e52 chore(vhdMerge): ensure block sizes are identical 2017-02-23 18:31:05 +01:00
Julien Fontanet
33a81d4f3c chore(vhd-merge): rename and clean BAT methods
- readAllocationTableEntry() → _getBatEntry()
- writeAllocationTableEntry() → _setBatEntry()
2017-02-23 18:31:05 +01:00
Julien Fontanet
4911df0bf9 chore(Vhd#writeAllocationTableEntry): commit the change in the file 2017-02-23 18:31:05 +01:00
Julien Fontanet
2b612b5db7 feat(constant-stream): emit the same data n times 2017-02-23 18:31:05 +01:00
Julien Fontanet
bfe81b52ef chore(vhd-merge): Vhd#_writeStream() 2017-02-23 18:31:05 +01:00
Julien Fontanet
26f6a4beb9 chore(vhd-merge): Vhd#_readStream() 2017-02-23 18:31:05 +01:00
Julien Fontanet
52e9c3053a fix(console proxy): works with ws v2
Fixes vatesfr/xo-web#1954
2017-02-23 16:59:41 +01:00
Julien Fontanet
908d1f1ec8 feat(Xo#defineProperties): helper to define multiple properties 2017-02-23 12:23:18 +01:00
Julien Fontanet
6a1120f95b feat(Xo#defineProperty): accept a thisArg param 2017-02-23 12:18:54 +01:00
Julien Fontanet
2a2780c25c feat(Xapi#deleteVm): delete disks by default 2017-02-23 11:06:10 +01:00
Julien Fontanet
7d4152197b chore: update yarn.lock 2017-02-22 22:07:52 +01:00
greenkeeper[bot]
9c742600ff fix(package): update jest to version 19.0.1 (#504)
https://greenkeeper.io/
2017-02-22 12:10:12 +01:00
greenkeeper[bot]
a035bf132a fix(package): update pretty-format to version 19.0.0 (#503)
https://greenkeeper.io/
2017-02-21 10:52:37 +01:00
Julien Fontanet
b989d157a0 feat(redis): can connect via Unix socket
Fixes vatesfr/xo-web#1944
2017-02-16 11:32:35 +01:00
Julien Fontanet
261587511b chore(package): update all dependencies 2017-02-13 16:13:19 +01:00
Julien Fontanet
ff798801fb chore(package): update xen-api to v0.10.0-2 2017-02-13 16:08:09 +01:00
Julien Fontanet
9b4aab0d19 chore: update yarn.lock 2017-02-13 16:07:20 +01:00
greenkeeper[bot]
6e42cf9952 fix(package): update fs-promise to version 2.0.0 (#501)
https://greenkeeper.io/
2017-02-13 10:34:33 +01:00
Julien Fontanet
4c3a8ca312 fix(Xapi#createVm): wait for VM record to have all its VBDs/VIFs (#500) 2017-02-09 16:49:58 +01:00
Julien Fontanet
a63eb48f03 fix(Xapi#createVm): do not wait for a new record 2017-02-09 16:06:25 +01:00
Julien Fontanet
d0214f805e fix(Xapi#createVm): _waitObject → _waitObjectState 2017-02-09 16:06:25 +01:00
badrAZ
d736bd6501 fix(vm.create): VIFs param is optional (#499) 2017-02-09 11:16:20 +01:00
Pierre Donias
2ce4a11e0a fix(vm.delete): import mapFilter (#497) 2017-02-07 11:27:30 +01:00
Pierre Donias
e5ab8fe3e4 feat(vm.delete): remove ACLs (#496) 2017-02-02 22:11:13 +01:00
Julien Fontanet
657b74a084 Revert "feat(vm.snapshot): copy ACLs from VM to snapshot (#495)"
This reverts commit dfee98b66b.

Should not be necessary: snapshots inherit ACLs from their VM.
2017-02-01 10:24:26 +01:00
Pierre Donias
dfee98b66b feat(vm.snapshot): copy ACLs from VM to snapshot (#495)
See vatesfr/xo-web#1865

Also, correctly remove ACLs on VM deletion.
2017-02-01 10:22:17 +01:00
Julien Fontanet
f65b9f695e 5.6.1 2017-01-30 18:07:39 +01:00
Julien Fontanet
4056385cd3 feat(backups): do not rely on JSON format for lvm commands (#493) 2017-01-30 18:04:27 +01:00
Pierre Donias
96d56d43bc feat(Xapi#installSupplementalPackOnAllHosts) (#491)
See vatesfr/xo-web#1896
2017-01-30 10:53:26 +01:00
Julien Fontanet
eba8f95e58 5.6.0 2017-01-27 16:42:07 +01:00
Julien Fontanet
7e2da1ff93 [WiP] feat(backups): implements file restore for LVM (#490)
Fixes vatesfr/xo-web#1878
2017-01-27 16:37:34 +01:00
Pierre Donias
b7b7e81468 feat(host.installSupplementalPack) (#487)
See vatesfr/xo-web#1460
2017-01-25 16:08:31 +01:00
Pierre Donias
0c7768f5d2 fix(vm.delete): IP addresses should always be deallocated (#488)
Fixes vatesfr/xo-web#1906
2017-01-25 15:46:33 +01:00
Pierre Donias
8fe6a56dfc fix(Xapi#installAllPoolPatchesOnHost): ignore PATCH_ALREADY_APPLIED error (#489)
Fixes vatesfr/xo-web#1904
2017-01-25 15:46:15 +01:00
Julien Fontanet
7b9dae980d fix(vm.create): properly handle optional param VDIs 2017-01-24 13:36:36 +01:00
Olivier Lambert
b59ba6b7bb feat(api): add description for some API calls (#486)
Fixes vatesfr/xo-web#1882
2017-01-17 15:15:18 +01:00
Julien Fontanet
8cdee4d173 chore(xo): disable too many listeners warning 2017-01-16 15:50:18 +01:00
Julien Fontanet
c9ed5fbe00 chore: update yarn.lock 2017-01-16 15:18:46 +01:00
Julien Fontanet
e698e89968 feat(/signout): URL to sign out 2017-01-16 14:33:58 +01:00
Pierre Donias
02f198d42c feat(backup.fetchFiles): multiple files support (#485)
See vatesfr/xo-web#1877
2017-01-16 09:33:22 +01:00
Pierre Donias
61d2d0263b feat(patching): eject tools ISOs before patching host (#479)
Fixes #1798
2017-01-13 18:20:31 +01:00
badrAZ
ed477e99a8 feat(plugin): provide a getDataDir() to plugins (#483)
It returns the path of a directory where the plugin can store data.
2017-01-13 18:13:44 +01:00
Olivier Lambert
1449be8d66 feat(host): expose supplemental packs (#480) 2017-01-12 17:54:48 +01:00
greenkeeper[bot]
28902d8747 fix(package): update execa to version 0.6.0 (#478)
https://greenkeeper.io/
2017-01-09 10:50:31 +01:00
Julien Fontanet
d534592479 5.5.4 2017-01-06 16:57:47 +01:00
Pierre Donias
b2f6ea9116 fix(vm.set): allocate resources when editing VM (#477)
Fixes vatesfr/xo-web#1695
2017-01-06 16:54:49 +01:00
Pierre Donias
8bf38bb29b feat(server): store connection error in database (#472)
See vatesfr/xo-web#1833
2017-01-06 16:38:17 +01:00
greenkeeper[bot]
9c6a78b678 fix(package): update promise-toolbox to version 0.8.0 (#476)
https://greenkeeper.io/
2017-01-06 11:34:27 +01:00
Pierre Donias
850199d7fc fix(resource-sets): recompute limits (#475)
Fixes vatesfr/xo-web#1866
2017-01-06 10:09:36 +01:00
Pierre Donias
4282928960 fix(vif/create): locking mode when allocating IP addresses (#474)
Fixes vatesfr/xo-web#1747
2017-01-06 09:55:55 +01:00
Julien Fontanet
356dd89d9f chore(package): upgrade jest to v 0.18.1 2017-01-03 18:30:28 +01:00
Julien Fontanet
7dd2391e5a fix(group.setUsers): oldUsers → oldUsersIds 2017-01-03 11:20:25 +01:00
Julien Fontanet
e0093f236a fix(group.create): do not attempt to parse empty prop 2017-01-03 10:47:10 +01:00
Julien Fontanet
8c5c32268a fix: users and groups serialization in Redis
Fixes vatesfr/xo-web#1852.
2017-01-02 16:52:51 +01:00
greenkeeper[bot]
b61ccc1af1 fix(package): update hashy to version 0.6.0 (#470)
https://greenkeeper.io/
2017-01-02 13:01:29 +01:00
Julien Fontanet
7caf0e40f4 5.5.3 2017-01-02 10:56:08 +01:00
Julien Fontanet
a16508db10 fix(remotes): do not error on disabled remote
- testRemote()
- updateRemote()
- remoteRemote()
- forgetAllRemotes()
2016-12-25 20:07:42 +01:00
Julien Fontanet
81bff342b9 chore(package): update decorator-synchronized to version 0.2.3 2016-12-22 16:25:46 +01:00
Julien Fontanet
49d41a76a0 5.5.2 2016-12-22 11:22:45 +01:00
Julien Fontanet
b1732b3298 fix(file restore): work around for invalid delta VHD path (#467)
See vatesfr/xo-web#1842.
2016-12-22 11:20:51 +01:00
Julien Fontanet
9372cdb6c7 fix(vm.rollingDeltaBackup): do not hide error 2016-12-22 10:21:38 +01:00
Julien Fontanet
1d8e54b83e chore(backups): use directly Xo#getRemoteHandler() 2016-12-22 09:50:16 +01:00
Julien Fontanet
30c5600271 chore(Xo#getRemoteHandler): use intermediary variable 2016-12-22 09:49:36 +01:00
Julien Fontanet
9f7e5c3a9a fix(Xo#getRemoteHandler): throws if remote is disabled 2016-12-22 09:49:04 +01:00
Julien Fontanet
37c9342717 fix(vm.rollingDeltaBackup): correctly delete snapshot in case of failure 2016-12-21 22:35:43 +01:00
Julien Fontanet
8827f8e940 fix(backup.fetchFiles): encode URI suffix
Fix issue with whitespaces in the filename.
2016-12-20 17:07:13 +01:00
Julien Fontanet
58334bf4a1 fix(backup.list): timestamps should be integers 2016-12-20 17:07:13 +01:00
Julien Fontanet
b898a6702c chore(package): use husky instead of ghooks 2016-12-20 17:07:13 +01:00
Julien Fontanet
6d78a810b9 perf(RemoteHandlerAbstract/createReadStream): optimise code
- avoid async function: overhead with transpilation
- do as much as possible in parallel
- fix: do not add length property in range mode
2016-12-20 17:07:13 +01:00
Julien Fontanet
8fc4eb8cdf 5.5.1 2016-12-20 13:38:02 +01:00
Julien Fontanet
b3fac0c56f fix(backup.list): datetimes should be timestamps 2016-12-20 12:50:17 +01:00
Julien Fontanet
0b063b1f5e 5.5.0 2016-12-20 12:29:16 +01:00
Olivier Lambert
480f05e676 feat(vm): add install time (#465) 2016-12-20 12:19:11 +01:00
Julien Fontanet
1ac8af34ec feat(backup): implement file restore (#461)
See vatesfr/xo-web#1590

Current implementation has following limitations:

- only support local and NFS remotes
- requires installation of libvhdi-utils
- files can only be recovered one by one
2016-12-20 12:18:22 +01:00
Julien Fontanet
34ff8b0f02 feat(Xapi#exportDeltaVm): don't export VDIs with names starting with [NOBAK] (#464)
Fixes vatesfr/xo-web#826
2016-12-14 10:57:25 +01:00
Julien Fontanet
77c3684e28 chore(tests): execute tests directly in src/ 2016-12-13 18:20:17 +01:00
Julien Fontanet
93038ea838 chore(package): remove unused trace 2016-12-13 14:08:38 +01:00
Julien Fontanet
46348f7cba feat: yarn integration 2016-12-13 12:15:26 +01:00
Julien Fontanet
ccc0e45daf feat(tests): use Jest instead of mocha/chai/must 2016-12-13 12:15:03 +01:00
Julien Fontanet
46ca03b017 chore(package): clean scripts 2016-12-13 11:55:12 +01:00
Julien Fontanet
1bfe3197a5 chore(Travis): test with Node stable 2016-12-13 11:51:04 +01:00
Julien Fontanet
4d2617fe68 chore(package): requires Node >= 4 2016-12-13 11:49:54 +01:00
Julien Fontanet
92e289f9da fix(decorators/mixin): do not use arrow function for constructor
It works because of the transpilation but it's not valid ES2015.
2016-12-13 11:41:41 +01:00
greenkeeper[bot]
a8c7558a77 chore(package): update index-modules to version 0.2.1 (#463) 2016-12-12 16:49:10 +01:00
greenkeeper[bot]
c756e7ecbe chore(package): update index-modules to version 0.2.0 (#462)
https://greenkeeper.io/
2016-12-12 16:16:44 +01:00
Pierre Donias
1998c56e84 feat(vm.delete): release resource set and IP-pool addresses (#460)
Fixes vatesfr/xo-web#1657, fixes vatesfr/xo-web#1748
2016-12-12 15:14:31 +01:00
Julien Fontanet
2ed55b1616 chore(decorators): remove unused @autobind. 2016-12-08 11:47:17 +01:00
Julien Fontanet
0c8d456fd3 chore(package): use bind-property-descriptor instead of custom implementation 2016-12-08 11:46:29 +01:00
Julien Fontanet
9e4924caf6 5.4.1 2016-12-02 16:37:17 +01:00
Julien Fontanet
7f391a5860 Merge branch 'next-release' into stable 2016-12-02 16:37:13 +01:00
Julien Fontanet
5c7249c8fc fix(Xapi#exportDeltaVm): remove TAG_BASE_DELTA if full export
Fixes vatesfr/xo-web#1811
2016-12-02 16:09:27 +01:00
Pierre Donias
932d00133d feat(job-executor.match): __not pattern property (#459)
See vatesfr/xo-web#1503
2016-12-01 14:56:52 +01:00
Julien Fontanet
32a371bf13 chore(package): use golike-defer instead of custom implementation 2016-11-30 15:40:30 +01:00
Julien Fontanet
5d0622d2cf 5.4.0 2016-11-23 11:10:01 +01:00
Pierre Donias
9ab9155bf0 fix(vif.set): remove old VIF before creating new one (#457)
Fixes #1784
2016-11-23 10:38:24 +01:00
Julien Fontanet
86a1ed6d46 chore(package): remove unused nyc 2016-11-23 10:00:45 +01:00
Julien Fontanet
b3c9936d74 chore(package): update xen-api to v0.9.6 2016-11-23 09:58:04 +01:00
greenkeeper[bot]
21b4d7cf11 chore(package): update nyc to version 10.0.0 (#456)
https://greenkeeper.io/
2016-11-23 09:12:26 +01:00
greenkeeper[bot]
4ec07f9ff8 fix(package): update get-stream to version 3.0.0 (#458)
https://greenkeeper.io/
2016-11-23 09:11:39 +01:00
greenkeeper[bot]
b7c89d6f64 fix(package): update http-server-plus to version 0.8.0 (#454)
https://greenkeeper.io/
2016-11-18 14:44:50 +01:00
greenkeeper[bot]
0eb168ec70 fix(package): update uuid to version 3.0.0 (#453)
https://greenkeeper.io/
2016-11-18 09:10:07 +01:00
Olivier Lambert
8ac1a66e93 feat(sr.shared): new boolean property (#452) 2016-11-17 14:33:45 +01:00
badrAZ
301da3662a fix(plugin.test): data param is optional (#451) 2016-11-16 16:08:11 +01:00
greenkeeper[bot]
e474946cb7 fix(package): update xo-common to version 0.1.0 (#450)
https://greenkeeper.io/
2016-11-16 12:01:27 +01:00
Pierre Donias
9a0ca1ebb2 feat(api): map 10 XAPI errors to XO errors (#449)
Fixes vatesfr/xo-web#1481
2016-11-16 11:22:31 +01:00
Julien Fontanet
520f7b2a77 feat(job.create,job.set): ability to set userId (#448)
See vatesfr/xo-web#1733
2016-11-14 17:42:19 +01:00
Pierre Donias
c0b3b3aab8 Fix userId. 2016-11-14 16:59:10 +01:00
Pierre Donias
d499332ce3 It should be possible to not change a job's user. 2016-11-14 15:56:54 +01:00
Pierre Donias
19ce06e0bb feat(job#create,job#set): userId parameter
See vatesfr/xo-web#1733
2016-11-14 15:33:09 +01:00
greenkeeper[bot]
ea6ff4224e fix(package): update fs-promise to version 1.0.0 (#447)
https://greenkeeper.io/
2016-11-10 08:56:37 +01:00
Julien Fontanet
871d1f8632 fix(plugins registration): params order 2016-11-09 17:05:10 +01:00
badrAZ
77ce2ff6d1 feat(plugin.test): plugins can be tested (#446)
See vatesfr/xo-web#1749
2016-11-09 14:58:19 +01:00
Pierre Donias
6383104796 fix(Xapi#editPif): destroy VLAN from each PIF before creating new VLAN (#444) 2016-11-08 16:50:12 +01:00
Julien Fontanet
b99b4159c8 feat(Redis): support aliased commands
Fixes #443
2016-11-08 10:23:53 +01:00
Olivier Lambert
8bedb1f3b9 Merge pull request #442 from vatesfr/pierre-fix-xo-error
fix(api): xoError is not an object
2016-11-07 18:18:45 +01:00
Pierre Donias
dc85804a27 fix(api): xoError is not an object 2016-11-07 17:58:16 +01:00
greenkeeper[bot]
42a31e512a fix(package): update json-rpc-peer to version 0.13.0 (#441)
https://greenkeeper.io/
2016-11-07 14:57:53 +01:00
Pierre Donias
2be7388696 feat(api-errors): throw custom errors when XAPI error is caught (#440)
See vatesfr/xo-web#1717
2016-11-07 14:15:23 +01:00
Julien Fontanet
bc5b00781b 5.3.3 2016-11-04 11:44:09 +01:00
Olivier Lambert
313e2b3de6 fix(Sr): add type cifs in deviceConfig. Fixes vatesfr/xo-web#1615 (#439) 2016-11-04 11:42:03 +01:00
Julien Fontanet
0bbd002060 fix(xo.importConfig): dont unnecessarily delete existing users
Do not delete existing users with same name & id
2016-11-04 09:42:56 +01:00
Julien Fontanet
5e785266a5 fix(xo.importConfig): correctly import ACLs
Fixes vatesfr/xo-web#1722
2016-11-04 09:40:41 +01:00
Julien Fontanet
5870769e7d fix(vm.import{,Delta}Backup): make restored VMs identifiable
Their names is prefixed with the exported date and they have a specific tag (*restored from backup*).

Fixes vatesfr/xo-web#1719
2016-11-03 16:22:42 +01:00
Julien Fontanet
79b80dcd07 fix(pif#carrier): cast to boolean 2016-11-02 16:50:12 +01:00
Olivier Lambert
6f6e547e6c feat(pif): add carrier (#438)
Fixes vatesfr/xo-web#1702
2016-11-02 16:25:44 +01:00
greenkeeper[bot]
352c9357df chore(package): update dependencies (#437)
https://greenkeeper.io/
2016-11-01 19:05:11 +01:00
Pierre Donias
1ba4641641 feat(acls): handle xo.clean (#436) 2016-10-31 15:53:50 +01:00
Greenkeeper
60e0047285 chore(package): update helmet to version 3.0.0 (#435)
https://greenkeeper.io/
2016-10-29 12:52:18 +02:00
Pierre Donias
235e7c143c fix(signin): new Bootstrap classes (#434) 2016-10-28 10:11:41 +02:00
Julien Fontanet
522d6eed92 5.3.2 2016-10-27 18:49:32 +02:00
Julien Fontanet
9d1d6ea4c5 feat(xo): export/import config (#427)
See vatesfr/xo-web#786
2016-10-27 18:48:19 +02:00
Julien Fontanet
0afd506a41 5.3.1 2016-10-27 18:25:16 +02:00
Julien Fontanet
9dfb837e3f fix(Xapi#importDeltaVm): gracefully handle missing vif.$network$uuid (#433) 2016-10-27 16:46:45 +02:00
fufroma
4ab63b569f fix(RemoteHandlerNfs): move mount points in /run/xo-server/mounts
Fixes vatesfr/xo-web#1405
2016-10-27 15:56:33 +02:00
Julien Fontanet
8d390d256d fix(http-request): handle redirections (#432) 2016-10-27 15:34:54 +02:00
Julien Fontanet
4eec5e06fc fix(package): test on Node 6, not 7 (#431) 2016-10-27 12:24:40 +02:00
Julien Fontanet
e4063b1ba8 feat(sample.config.yaml): add warning about YAML 2016-10-24 22:52:11 +02:00
Greenkeeper
0c3227cf8e chore(package): update promise-toolbox to version 0.7.0 (#428)
https://greenkeeper.io/
2016-10-24 15:01:17 +02:00
Pierre Donias
7bed200bf5 feat(pif): editVlan (#426)
Fix vatesfr/xo-web#1092
2016-10-24 10:24:44 +02:00
Julien Fontanet
4f763e2109 5.3.0 2016-10-20 16:01:53 +02:00
Pierre Donias
75167fb65b feat(pif): expose IP config modes (#424)
See vatesfr/xo-web#1651
2016-10-20 12:44:35 +02:00
Julien Fontanet
675588f780 feat(delta backups): force checksums refresh
See vatesfr/xo-web#1672
2016-10-20 12:38:26 +02:00
Julien Fontanet
2d6f94edd8 fix(vhd-merge/chainVhd): correctly await _write()
Fixes vatesfr/xo-web#1672
2016-10-20 12:31:20 +02:00
Julien Fontanet
247c66ef4b feat(IP pools): can be used in resource sets (#413)
See vatesfr/xo-web#1565
2016-10-19 11:17:05 +02:00
Greenkeeper
1076fac40f Update gulp-sourcemaps to version 2.1.1 🚀 (#422)
https://greenkeeper.io/
2016-10-14 10:44:27 +02:00
Julien Fontanet
14a4a415a2 5.2.6 2016-10-13 18:51:16 +02:00
Julien Fontanet
524355b59c fix(vhd-merge/chainVhd): correctly compute header checksum (#419)
Fixes vatesfr/xo-web#1656
2016-10-13 18:49:58 +02:00
Greenkeeper
36fe49f3f5 Update promise-toolbox to version 0.6.0 🚀 (#416)
https://greenkeeper.io/
2016-10-12 09:19:19 +02:00
Greenkeeper
c0c0af9b14 chore(package): update execa to version 0.5.0 (#411)
https://greenkeeper.io/
2016-10-05 10:40:31 +02:00
Julien Fontanet
d1e472d482 chore(package): use babel-plugin-lodash 2016-10-04 16:05:01 +02:00
Julien Fontanet
c80e43ad0d fix(vm.create): don't require view perm on VM template 2016-10-04 16:03:06 +02:00
Julien Fontanet
fdd395e2b6 fix(vm.create): correctly check resourceSet objects
Related to vatesfr/xo-web#1620
2016-10-04 15:51:04 +02:00
Julien Fontanet
e094437168 fix(package): update xo-acl-resolver to version 0.2.2
See vatesfr/xo-web#1620
2016-10-04 15:24:01 +02:00
Pierre Donias
2ee0be7466 fix(xapi/utils/makeEditObject): constraints works with user props (#410) 2016-10-04 15:02:27 +02:00
Julien Fontanet
2784a7cc92 Create ISSUE_TEMPLATE.md 2016-10-03 16:24:24 +02:00
Julien Fontanet
b09f998d6c 5.2.5 2016-10-03 09:39:52 +02:00
Nicolas Raynaud
bdeb5895f6 fix(deltaBackups): update checksum after altering VHD files (#408)
Fixes vatesfr/xo-web#1606
2016-09-30 14:31:33 +02:00
Pierre Donias
3944b8aaee feat(network): create a bonded network (#407)
Fixes vatesfr/xo-web#876
2016-09-30 13:51:33 +02:00
Nicolas Raynaud
6e66cffb92 feat(deltaBackups): correctly chain VHDs (#406)
The goal is for a tool like vhdimount to be able to mount any file and use it as a disk to recover specific file in it.
2016-09-29 17:31:36 +02:00
Pierre Donias
57092ee788 feat(vif.set): support for network, MAC and currently_attached (#403)
Fixes vatesfr/xo-web#1446
2016-09-28 15:09:17 +02:00
Julien Fontanet
70e9e1c706 chore(package): update human-format to version 0.7.0 2016-09-28 09:58:54 +02:00
Greenkeeper
9662b8fbee chore(package): update babel-eslint to version 7.0.0 (#404)
https://greenkeeper.io/
2016-09-27 23:39:30 +02:00
Julien Fontanet
9f66421ae7 fix(bootstrap): C-c twice force stop the server 2016-09-27 10:44:24 +02:00
Greenkeeper
50584c2e50 chore(package): update http-server-plus to version 0.7.0 (#402)
https://greenkeeper.io/
2016-09-27 09:30:16 +02:00
Julien Fontanet
7be4e1901a chore(package): use index-modules 2016-09-26 15:41:41 +02:00
Julien Fontanet
b47146de45 fix(pbd/attached): should be a boolean 2016-09-22 13:20:49 +02:00
Julien Fontanet
97b229b2c7 fix(vm.set): works with VM templates
Fixes vatesfr/xo-web#1569
2016-09-22 10:39:20 +02:00
Julien Fontanet
6bb5bb9403 5.2.4 2016-09-21 10:20:46 +02:00
Julien Fontanet
8c4b8271d8 fix(pool.setDefaultSr): remove pool param
Fixes vatesfr/xo-web#1558
2016-09-20 11:45:36 +02:00
Julien Fontanet
69291c0574 chore(package): update xo-vmdk-to-vhd to version 0.0.12
Fixes vatesfr/xo-web#1551
2016-09-20 10:41:42 +02:00
Julien Fontanet
2dc073dcd6 fix(vm.resourceSet): handle xo namespace 2016-09-19 13:15:23 +02:00
Julien Fontanet
1894cb35d2 feat(vm): expose resourceSet prop 2016-09-19 12:10:09 +02:00
Julien Fontanet
cd37420b07 Merge pull request #398 from vatesfr/greenkeeper-standard-8.1.0
Update standard to version 8.1.0 🚀
2016-09-18 05:17:41 +02:00
Julien Fontanet
55cb6b39db fix(Xo#removeSchedule): correctly test instance of SchedulerError 2016-09-18 05:12:36 +02:00
greenkeeperio-bot
89d13b2285 chore(package): update standard to version 8.1.0
https://greenkeeper.io/
2016-09-17 20:51:59 +02:00
Julien Fontanet
1b64b0468a fix(group.delete): remove associated ACLs
Fixes vatesfr/xo-web#899
2016-09-16 16:04:41 +02:00
Julien Fontanet
085fb83294 fix(user.delete): remove associated ACLs
See vatesfr/xo-web#899
2016-09-16 16:04:41 +02:00
Julien Fontanet
edd606563f feat(vm.revert): can snapshot before (#395)
See vatesfr/xo-web#1445
2016-09-15 14:59:43 +02:00
Julien Fontanet
fb804e99f0 5.2.3 2016-09-14 18:02:32 +02:00
Pierre Donias
1707cbcb54 feat(signin): use XO 5 style (#394)
Fixes vatesfr/xo-web#1161
2016-09-14 17:56:05 +02:00
90 changed files with 10409 additions and 1613 deletions

3
.mention-bot Normal file
View File

@@ -0,0 +1,3 @@
{
"userBlacklist": [ "greenkeeper", "Wescoeur" ]
}

View File

@@ -1 +0,0 @@
--require ./better-stacks.js

View File

@@ -1,8 +1,8 @@
language: node_js
node_js:
# - 'stable'
- '4'
- '0.12'
- stable
- 6
- 4
# Use containers.
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/

3
ISSUE_TEMPLATE.md Normal file
View File

@@ -0,0 +1,3 @@
# ALL ISSUES SHOULD BE CREATED IN XO-WEB'S TRACKER!
https://github.com/vatesfr/xo-web/issues

View File

@@ -1,13 +1,5 @@
Error.stackTraceLimit = 100
// Async stacks.
//
// Disabled for now as it cause a huge memory usage with
// fs.createReadStream().
// TODO: find a way to reenable.
//
// try { require('trace') } catch (_) {}
// Removes internal modules.
try {
var sep = require('path').sep

View File

@@ -58,7 +58,7 @@ gulp.task(function buildCoffee () {
})
gulp.task(function buildEs6 () {
return src('**/*.js')
return src([ '**/*.js', '!*.spec.js' ])
.pipe(sourceMaps.init())
.pipe(babel())
.pipe(sourceMaps.write('.'))

View File

@@ -4,7 +4,7 @@
// Enable xo logs by default.
if (process.env.DEBUG === undefined) {
process.env.DEBUG = 'app-conf,xen-api,xo:*'
process.env.DEBUG = 'app-conf,xo:*,-xo:api'
}
// Import the real main module.

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server",
"version": "5.2.2",
"version": "5.7.0",
"license": "AGPL-3.0",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -31,132 +31,146 @@
"bin": "bin"
},
"engines": {
"node": ">=0.12"
"node": ">=4.5"
},
"dependencies": {
"@marsaud/smb2-promise": "^0.2.1",
"app-conf": "^0.4.0",
"babel-runtime": "^6.5.0",
"@nraynaud/struct-fu": "^1.0.1",
"app-conf": "^0.4.1",
"archiver": "^1.3.0",
"arp-a": "^0.5.1",
"babel-runtime": "^6.23.0",
"base64url": "^2.0.0",
"blocked": "^1.1.0",
"bluebird": "^3.1.1",
"body-parser": "^1.13.3",
"bind-property-descriptor": "^0.0.0",
"blocked": "^1.2.1",
"bluebird": "^3.5.0",
"body-parser": "^1.17.1",
"connect-flash": "^0.1.1",
"cookie": "^0.3.0",
"cookie-parser": "^1.3.5",
"cron": "^1.0.9",
"d3-time-format": "^2.0.0",
"debug": "^2.1.3",
"escape-string-regexp": "^1.0.3",
"event-to-promise": "^0.7.0",
"cookie": "^0.3.1",
"cookie-parser": "^1.4.3",
"cron": "^1.2.1",
"d3-time-format": "^2.0.5",
"debug": "^2.6.3",
"decorator-synchronized": "^0.2.3",
"escape-string-regexp": "^1.0.5",
"event-to-promise": "^0.8.0",
"exec-promise": "^0.6.1",
"execa": "^0.4.0",
"express": "^4.13.3",
"express-session": "^1.11.3",
"fatfs": "^0.10.3",
"fs-extra": "^0.30.0",
"fs-promise": "^0.5.0",
"get-stream": "^2.1.0",
"hashy": "~0.4.2",
"helmet": "^2.0.0",
"highland": "^2.5.1",
"http-proxy": "^1.13.2",
"http-server-plus": "^0.6.4",
"human-format": "^0.6.0",
"is-my-json-valid": "^2.13.1",
"js-yaml": "^3.2.7",
"json-rpc-peer": "^0.12.0",
"json5": "^0.5.0",
"execa": "^0.6.3",
"express": "^4.15.2",
"express-session": "^1.15.1",
"fatfs": "^0.10.4",
"from2": "^2.3.0",
"fs-extra": "^2.1.2",
"fs-promise": "^2.0.1",
"golike-defer": "^0.0.0",
"hashy": "~0.6.1",
"helmet": "^3.5.0",
"highland": "^2.10.5",
"http-proxy": "^1.16.2",
"http-server-plus": "^0.8.0",
"human-format": "^0.8.0",
"is-my-json-valid": "^2.16.0",
"is-redirect": "^1.0.0",
"js-yaml": "^3.8.2",
"json-rpc-peer": "^0.13.1",
"json5": "^0.5.1",
"julien-f-source-map-support": "0.0.0",
"julien-f-unzip": "^0.2.1",
"kindof": "^2.0.0",
"level": "^1.3.0",
"level": "^1.6.0",
"level-party": "^3.0.4",
"level-sublevel": "^6.5.2",
"leveldown": "^1.4.2",
"lodash": "^4.13.1",
"level-sublevel": "^6.6.1",
"leveldown": "^1.6.0",
"lodash": "^4.17.4",
"make-error": "^1",
"micromatch": "^2.3.2",
"micromatch": "^2.3.11",
"minimist": "^1.2.0",
"moment-timezone": "^0.5.4",
"ms": "^0.7.1",
"multikey-hash": "^1.0.1",
"ndjson": "^1.4.3",
"moment-timezone": "^0.5.11",
"ms": "^1.0.0",
"multikey-hash": "^1.0.4",
"ndjson": "^1.5.0",
"parse-pairs": "^0.2.2",
"partial-stream": "0.0.0",
"passport": "^0.3.0",
"passport": "^0.3.2",
"passport-local": "^1.0.0",
"promise-toolbox": "^0.5.0",
"pretty-format": "^19.0.0",
"promise-toolbox": "^0.8.2",
"proxy-agent": "^2.0.0",
"pug": "^2.0.0-alpha6",
"redis": "^2.0.1",
"schema-inspector": "^1.5.1",
"semver": "^5.1.0",
"serve-static": "^1.9.2",
"stack-chain": "^1.3.3",
"struct-fu": "^1.0.0",
"pug": "^2.0.0-beta11",
"redis": "^2.7.1",
"schema-inspector": "^1.6.8",
"semver": "^5.3.0",
"serve-static": "^1.12.1",
"split-lines": "^1.1.0",
"stack-chain": "^1.3.7",
"tar-stream": "^1.5.2",
"through2": "^2.0.0",
"trace": "^2.0.1",
"ws": "^1.1.1",
"xen-api": "^0.9.4",
"xml2js": "~0.4.6",
"xo-acl-resolver": "^0.2.1",
"xo-collection": "^0.4.0",
"through2": "^2.0.3",
"tmp": "^0.0.31",
"uuid": "^3.0.1",
"ws": "^2.2.2",
"xen-api": "^0.10.0-2",
"xml2js": "~0.4.17",
"xo-acl-resolver": "^0.2.3",
"xo-collection": "^0.4.1",
"xo-common": "^0.1.1",
"xo-remote-parser": "^0.3",
"xo-vmdk-to-vhd": "0.0.5"
"xo-vmdk-to-vhd": "0.0.12"
},
"devDependencies": {
"babel-eslint": "^6.0.4",
"babel-eslint": "^7.2.1",
"babel-plugin-lodash": "^3.2.11",
"babel-plugin-transform-decorators-legacy": "^1.3.4",
"babel-plugin-transform-runtime": "^6.5.2",
"babel-preset-es2015": "^6.5.0",
"babel-preset-stage-0": "^6.5.0",
"chai": "^3.0.0",
"dependency-check": "^2.4.0",
"ghooks": "^1.0.3",
"babel-plugin-transform-runtime": "^6.23.0",
"babel-preset-env": "^1.2.2",
"babel-preset-stage-0": "^6.22.0",
"dependency-check": "^2.8.0",
"gulp": "git://github.com/gulpjs/gulp#4.0",
"gulp-babel": "^6",
"gulp-coffee": "^2.3.1",
"gulp-plumber": "^1.0.0",
"gulp-sourcemaps": "^1.5.1",
"gulp-watch": "^4.2.2",
"leche": "^2.1.1",
"mocha": "^3.0.2",
"must": "^0.13.1",
"nyc": "^8.1.0",
"rimraf": "^2.5.2",
"sinon": "^1.14.1",
"standard": "^7.0.0"
"gulp-coffee": "^2.3.4",
"gulp-plumber": "^1.1.0",
"gulp-sourcemaps": "^2.4.1",
"gulp-watch": "^4.3.11",
"husky": "^0.13.2",
"index-modules": "^0.3.0",
"jest": "^19.0.2",
"rimraf": "^2.6.1",
"standard": "^9.0.2"
},
"scripts": {
"build": "npm run build-indexes && gulp build --production",
"depcheck": "dependency-check ./package.json",
"build-indexes": "./tools/generate-index src/api src/xapi/mixins src/xo-mixins",
"dev": "npm run build-indexes && gulp build",
"dev-test": "mocha --opts .mocha.opts --watch --reporter=min \"dist/**/*.spec.js\"",
"lint": "standard",
"postrelease": "git checkout master && git merge --ff-only stable && git checkout next-release && git merge --ff-only stable",
"posttest": "npm run lint && npm run depcheck",
"build": "gulp build --production",
"commitmsg": "npm test",
"dev": "gulp build",
"dev-test": "jest --bail --watch",
"posttest": "standard && dependency-check -i constant-stream ./package.json",
"prebuild": "index-modules src/api src/xapi/mixins src/xo-mixins",
"predev": "npm run prebuild",
"prepublish": "npm run build",
"prerelease": "git checkout next-release && git pull --ff-only && git checkout stable && git pull --ff-only && git merge next-release",
"release": "npm version",
"start": "node bin/xo-server",
"test": "nyc mocha --opts .mocha.opts \"dist/**/*.spec.js\""
"test": "jest"
},
"babel": {
"plugins": [
"lodash",
"transform-decorators-legacy",
"transform-runtime"
],
"presets": [
"stage-0",
"es2015"
[
"env",
{
"targets": {
"node": 4
}
}
],
"stage-0"
]
},
"config": {
"ghooks": {
"commit-msg": "npm test"
}
"jest": {
"roots": [
"<rootDir>/src"
],
"testRegex": "\\.spec\\.js$"
},
"standard": {
"ignore": [

View File

@@ -1,11 +1,17 @@
# Example XO-Server configuration.
# BE *VERY* CAREFUL WHEN EDITING!
# YAML FILES ARE SUPER SUPER SENSITIVE TO MISTAKES IN WHITESPACE OR ALIGNMENT!
# visit http://www.yamllint.com/ to validate this file as needed
#=====================================================================
# Example XO-Server configuration.
#
# This file is automatically looking for at the following places:
# - `$HOME/.config/xo-server/config.yaml`
# - `/etc/xo-server/config.yaml`
#
# The first entries have priority.
#
# Note: paths are relative to the configuration file.
#=====================================================================
@@ -117,10 +123,23 @@ http:
# Connection to the Redis server.
redis:
# Syntax: redis://[db[:password]@]hostname[:port]
# Unix sockets can be used
#
# Default: redis://localhost:6379
#uri: ''
# Default: undefined
#socket: /var/run/redis/redis.sock
# Syntax: redis://[db[:password]@]hostname[:port][/db-number]
#
# Default: redis://localhost:6379/0
#uri: redis://redis.company.lan/42
# List of aliased commands.
#
# See http://redis.io/topics/security#disabling-of-specific-commands
#renameCommands:
# del: '3dda29ad-3015-44f9-b13b-fa570de92489'
# srem: '3fd758c9-5610-4e9d-a058-dbf4cb6d8bf0'
# Directory containing the database of XO.
# Currently used for logs.

View File

@@ -6,55 +6,45 @@ html
meta(name = 'viewport' content = 'width=device-width, initial-scale=1.0')
title Xen Orchestra
meta(name = 'author' content = 'Vates SAS')
link(rel = 'stylesheet' href = 'styles/main.css')
link(rel = 'stylesheet' href = 'v4/styles/main.css')
body
.container
.row-login
.page-header
img(src = 'images/logo_small.png')
h2 Xen Orchestra
form.form-horizontal(action = 'signin/local' method = 'post')
fieldset
legend.login
h3 Sign in
if error
p.text-danger #{error}
.form-group
.col-sm-12
.input-group
span.input-group-addon
i.xo-icon-user.fa-fw
input.form-control.input-sm(
name = 'username'
type = 'text'
placeholder = 'Username'
required
)
.form-group
.col-sm-12
.input-group
span.input-group-addon
i.fa.fa-key.fa-fw
input.form-control.input-sm(
name = 'password'
type = 'password'
placeholder = 'Password'
required
)
.form-group
.col-sm-5
.checkbox
label
input(
name = 'remember-me'
type = 'checkbox'
)
| Remember me
.form-group
.col-sm-12
button.btn.btn-login.btn-block.btn-success
i.fa.fa-sign-in
| Sign in
each label, id in strategies
div: a(href = 'signin/' + id) Sign in with #{label}
link(rel = 'stylesheet' href = 'index.css')
body(style = 'display: flex; height: 100vh;')
div(style = 'margin: auto; width: 20em;')
div.mb-2(style = 'display: flex;')
img(src = 'assets/logo.png' style = 'margin: auto;')
h2.text-xs-center.mb-2 Xen Orchestra
form(action = 'signin/local' method = 'post')
fieldset
if error
p.text-danger #{error}
.input-group.mb-1
span.input-group-addon
i.xo-icon-user.fa-fw
input.form-control(
name = 'username'
type = 'text'
placeholder = 'Username'
required
)
.input-group.mb-1
span.input-group-addon
i.fa.fa-key.fa-fw
input.form-control(
name = 'password'
type = 'password'
placeholder = 'Password'
required
)
.checkbox
label
input(
name = 'remember-me'
type = 'checkbox'
)
| &nbsp;
| Remember me
div
button.btn.btn-block.btn-info
i.fa.fa-sign-in
| Sign in
each label, id in strategies
div: a(href = 'signin/' + id) Sign in with #{label}

View File

@@ -1,70 +0,0 @@
import {JsonRpcError} from 'json-rpc-peer'
// ===================================================================
// Export standard JSON-RPC errors.
export { // eslint-disable-line no-duplicate-imports
InvalidJson,
InvalidParameters,
InvalidRequest,
JsonRpcError,
MethodNotFound
} from 'json-rpc-peer'
// -------------------------------------------------------------------
export class NotImplemented extends JsonRpcError {
constructor () {
super('not implemented', 0)
}
}
// -------------------------------------------------------------------
export class NoSuchObject extends JsonRpcError {
constructor (id, type) {
super('no such object', 1, {id, type})
}
}
// -------------------------------------------------------------------
export class Unauthorized extends JsonRpcError {
constructor () {
super('not authenticated or not enough permissions', 2)
}
}
// -------------------------------------------------------------------
export class InvalidCredential extends JsonRpcError {
constructor () {
super('invalid credential', 3)
}
}
// -------------------------------------------------------------------
export class AlreadyAuthenticated extends JsonRpcError {
constructor () {
super('already authenticated', 4)
}
}
// -------------------------------------------------------------------
export class ForbiddenOperation extends JsonRpcError {
constructor (operation, reason) {
super(`forbidden operation: ${operation}`, 5, reason)
}
}
// -------------------------------------------------------------------
// To be used with a user-readable message.
// The message can be destined to be displayed to the front-end user.
export class GenericError extends JsonRpcError {
constructor (message) {
super(message, 6)
}
}

0
src/api/.index-modules Normal file
View File

98
src/api/backup.js Normal file
View File

@@ -0,0 +1,98 @@
import archiver from 'archiver'
import { basename } from 'path'
import { format } from 'json-rpc-peer'
import { forEach } from 'lodash'
// ===================================================================
export function list ({ remote }) {
return this.listVmBackups(remote)
}
list.permission = 'admin'
list.params = {
remote: { type: 'string' }
}
// -------------------------------------------------------------------
export function scanDisk ({ remote, disk }) {
return this.scanDiskBackup(remote, disk)
}
scanDisk.permission = 'admin'
scanDisk.params = {
remote: { type: 'string' },
disk: { type: 'string' }
}
// -------------------------------------------------------------------
export function scanFiles ({ remote, disk, partition, path }) {
return this.scanFilesInDiskBackup(remote, disk, partition, path)
}
scanFiles.permission = 'admin'
scanFiles.params = {
remote: { type: 'string' },
disk: { type: 'string' },
partition: { type: 'string', optional: true },
path: { type: 'string' }
}
// -------------------------------------------------------------------
function handleFetchFiles (req, res, { remote, disk, partition, paths, format: archiveFormat }) {
this.fetchFilesInDiskBackup(remote, disk, partition, paths).then(files => {
res.setHeader('content-disposition', 'attachment')
res.setHeader('content-type', 'application/octet-stream')
const nFiles = paths.length
// Send lone file directly
if (nFiles === 1) {
files[0].pipe(res)
return
}
const archive = archiver(archiveFormat)
archive.on('error', error => {
console.error(error)
res.end(format.error(0, error))
})
forEach(files, file => {
archive.append(file, { name: basename(file.path) })
})
archive.finalize()
archive.pipe(res)
}).catch(error => {
console.error(error)
res.writeHead(500)
res.end(format.error(0, error))
})
}
export async function fetchFiles ({ format = 'zip', ...params }) {
const fileName = params.paths.length > 1
? `restore_${new Date().toJSON()}.${format}`
: basename(params.paths[0])
return this.registerHttpRequest(handleFetchFiles, { ...params, format }, {
suffix: encodeURI(`/${fileName}`)
}).then(url => ({ $getFrom: url }))
}
fetchFiles.permission = 'admin'
fetchFiles.params = {
remote: { type: 'string' },
disk: { type: 'string' },
format: { type: 'string', optional: true },
partition: { type: 'string', optional: true },
paths: {
type: 'array',
items: { type: 'string' },
minLength: 1
}
}

View File

@@ -5,10 +5,11 @@ $forEach = require 'lodash/forEach'
endsWith = require 'lodash/endsWith'
startsWith = require 'lodash/startsWith'
{coroutine: $coroutine} = require 'bluebird'
{format} = require 'json-rpc-peer'
{
extractProperty,
parseXml,
promisify
mapToArray,
parseXml
} = require '../utils'
#=====================================================================
@@ -261,6 +262,42 @@ stats.resolve = {
exports.stats = stats;
#---------------------------------------------------------------------
handleInstallSupplementalPack = $coroutine (req, res, { hostId }) ->
xapi = @getXapi(hostId)
# Timeout seems to be broken in Node 4.
# See https://github.com/nodejs/node/issues/3319
req.setTimeout(43200000) # 12 hours
req.length = req.headers['content-length']
try
yield xapi.installSupplementalPack(req, { hostId })
res.end(format.response(0))
catch e
res.writeHead(500)
res.end(format.error(0, new Error(e.message)))
return
installSupplementalPack = $coroutine ({host}) ->
return {
$sendTo: yield @registerHttpRequest(handleInstallSupplementalPack, { hostId: host.id })
}
installSupplementalPack.description = 'installs supplemental pack from ISO file'
installSupplementalPack.params = {
host: { type: 'string' }
}
installSupplementalPack.resolve = {
host: ['host', 'host', 'admin']
}
exports.installSupplementalPack = installSupplementalPack;
#=====================================================================
Object.defineProperty(exports, '__esModule', {

View File

@@ -1,8 +1,11 @@
import { unauthorized } from 'xo-common/api-errors'
export function create (props) {
return this.createIpPool(props)
}
create.permission = 'admin'
create.description = 'Creates a new ipPool'
// -------------------------------------------------------------------
@@ -12,14 +15,24 @@ function delete_ ({ id }) {
export { delete_ as delete }
delete_.permission = 'admin'
delete_.description = 'Delete an ipPool'
// -------------------------------------------------------------------
export function getAll () {
return this.getAllIpPools()
export function getAll (params) {
const { user } = this
if (!user) {
throw unauthorized()
}
return this.getAllIpPools(user.permission === 'admin'
? params && params.userId
: user.id
)
}
getAll.permission = 'admin'
getAll.description = 'List all ipPools'
// -------------------------------------------------------------------
@@ -28,3 +41,4 @@ export function set ({ id, ...props }) {
}
set.permission = 'admin'
set.description = 'Allow to modify an existing ipPool'

View File

@@ -18,7 +18,11 @@ get.params = {
}
export async function create ({job}) {
return (await this.createJob(this.session.get('user_id'), job)).id
if (!job.userId) {
job.userId = this.session.get('user_id')
}
return (await this.createJob(job)).id
}
create.permission = 'admin'
@@ -27,7 +31,9 @@ create.params = {
job: {
type: 'object',
properties: {
userId: {type: 'string', optional: true},
name: {type: 'string', optional: true},
timeout: {type: 'number', optional: true},
type: {type: 'string'},
key: {type: 'string'},
method: {type: 'string'},
@@ -60,6 +66,7 @@ set.params = {
properties: {
id: {type: 'string'},
name: {type: 'string', optional: true},
timeout: {type: 'number', optional: true},
type: {type: 'string'},
key: {type: 'string'},
method: {type: 'string'},

View File

@@ -1,3 +1,9 @@
import { mapToArray } from '../utils'
export function getBondModes () {
return ['balance-slb', 'active-backup', 'lacp']
}
export async function create ({ pool, name, description, pif, mtu = 1500, vlan = 0 }) {
return this.getXapi(pool).createNetwork({
name,
@@ -24,6 +30,40 @@ create.permission = 'admin'
// =================================================================
export async function createBonded ({ pool, name, description, pifs, mtu = 1500, mac, bondMode }) {
return this.getXapi(pool).createBondedNetwork({
name,
description,
pifIds: mapToArray(pifs, pif =>
this.getObject(pif, 'PIF')._xapiId
),
mtu: +mtu,
mac,
bondMode
})
}
createBonded.params = {
pool: { type: 'string' },
name: { type: 'string' },
description: { type: 'string', optional: true },
pifs: {
type: 'array',
items: {
type: 'string'
}
},
mtu: { type: ['integer', 'string'], optional: true },
// RegExp since schema-inspector does not provide a param check based on an enumeration
bondMode: { type: 'string', pattern: new RegExp(`^(${getBondModes().join('|')})$`) }
}
createBonded.resolve = {
pool: ['pool', 'pool', 'administrate']
}
createBonded.permission = 'admin'
createBonded.description = 'Create a bonded network. bondMode can be balance-slb, active-backup or lacp'
// ===================================================================
export async function set ({

View File

@@ -1,5 +1,15 @@
// TODO: too low level, move into host.
import { IPV4_CONFIG_MODES, IPV6_CONFIG_MODES } from '../xapi'
export function getIpv4ConfigurationModes () {
return IPV4_CONFIG_MODES
}
export function getIpv6ConfigurationModes () {
return IPV6_CONFIG_MODES
}
// ===================================================================
// Delete
@@ -66,3 +76,18 @@ reconfigureIp.params = {
reconfigureIp.resolve = {
pif: ['id', 'PIF', 'administrate']
}
// ===================================================================
export async function editPif ({ pif, vlan }) {
await this.getXapi(pif).editPif(pif._xapiId, { vlan })
}
editPif.params = {
id: { type: 'string' },
vlan: { type: ['integer', 'string'] }
}
editPif.resolve = {
pif: ['id', 'PIF', 'administrate']
}

View File

@@ -102,3 +102,24 @@ purgeConfiguration.params = {
}
purgeConfiguration.permission = 'admin'
// ---------------------------------------------------------------------
export async function test ({ id, data }) {
await this.testPlugin(id, data)
}
test.description = 'Test a plugin with its current configuration'
test.params = {
id: {
type: 'string'
},
data: {
optional: true
}
}
test.permission = 'admin'
// ---------------------------------------------------------------------

View File

@@ -1,4 +1,4 @@
import {GenericError} from '../api-errors'
import { format } from 'json-rpc-peer'
// ===================================================================
@@ -35,21 +35,21 @@ set.resolve = {
// -------------------------------------------------------------------
export async function setDefaultSr ({pool, sr}) {
await this.getXapi(pool).setDefaultSr(sr._xapiId)
export async function setDefaultSr ({ sr }) {
await this.hasPermissions(this.user.id, [ [ sr.$pool, 'administrate' ] ])
await this.getXapi(sr).setDefaultSr(sr._xapiId)
}
setDefaultSr.permission = '' // signed in
setDefaultSr.params = {
pool: {
type: 'string'
},
sr: {
type: 'string'
}
}
setDefaultSr.resolve = {
pool: ['pool', 'pool', 'administrate'],
sr: ['sr', 'SR']
}
// -------------------------------------------------------------------
@@ -76,16 +76,18 @@ export async function installAllPatches ({ pool }) {
await this.getXapi(pool).installAllPoolPatchesOnAllHosts()
}
installPatch.params = {
installAllPatches.params = {
pool: {
type: 'string'
}
}
installPatch.resolve = {
installAllPatches.resolve = {
pool: ['pool', 'pool', 'administrate']
}
installAllPatches.description = 'Install automatically all patches for every hosts of a pool'
// -------------------------------------------------------------------
async function handlePatchUpload (req, res, {pool}) {
@@ -121,12 +123,7 @@ export {uploadPatch as patch}
// -------------------------------------------------------------------
export async function mergeInto ({ source, target, force }) {
try {
await this.mergeXenPools(source._xapiId, target._xapiId, force)
} catch (e) {
// FIXME: should we expose plain XAPI error messages?
throw new GenericError(e.message)
}
await this.mergeXenPools(source._xapiId, target._xapiId, force)
}
mergeInto.params = {
@@ -145,7 +142,7 @@ mergeInto.resolve = {
export async function getLicenseState ({pool}) {
return this.getXapi(pool).call(
'pool.get_license_state',
pool._xapiId.$ref,
pool._xapiId.$ref
)
}
@@ -158,3 +155,38 @@ getLicenseState.params = {
getLicenseState.resolve = {
pool: ['pool', 'pool', 'administrate']
}
// -------------------------------------------------------------------
async function handleInstallSupplementalPack (req, res, { poolId }) {
const xapi = this.getXapi(poolId)
// Timeout seems to be broken in Node 4.
// See https://github.com/nodejs/node/issues/3319
req.setTimeout(43200000) // 12 hours
req.length = req.headers['content-length']
try {
await xapi.installSupplementalPackOnAllHosts(req)
res.end(format.response(0))
} catch (e) {
res.writeHead(500)
res.end(format.error(0, new Error(e.message)))
}
}
export async function installSupplementalPack ({ pool }) {
return {
$sendTo: await this.registerHttpRequest(handleInstallSupplementalPack, { poolId: pool.id })
}
}
installSupplementalPack.description = 'installs supplemental pack from ISO file on all hosts'
installSupplementalPack.params = {
pool: { type: 'string' }
}
installSupplementalPack.resolve = {
pool: ['pool', 'pool', 'admin']
}

View File

@@ -1,6 +1,6 @@
import {
Unauthorized
} from '../api-errors'
unauthorized
} from 'xo-common/api-errors'
// ===================================================================
@@ -51,11 +51,12 @@ delete_.params = {
// -------------------------------------------------------------------
export function set ({ id, name, subjects, objects, limits }) {
export function set ({ id, name, subjects, objects, ipPools, limits }) {
return this.updateResourceSet(id, {
limits,
name,
objects,
ipPools,
subjects
})
}
@@ -84,6 +85,13 @@ set.params = {
},
optional: true
},
ipPools: {
type: 'array',
items: {
type: 'string'
},
optional: true
},
limits: {
type: 'object',
optional: true
@@ -109,12 +117,14 @@ get.params = {
export async function getAll () {
const { user } = this
if (!user) {
throw new Unauthorized()
throw unauthorized()
}
return this.getAllResourceSets(user.id)
}
getAll.description = 'Get the list of all existing resource set'
// -------------------------------------------------------------------
export function addObject ({ id, object }) {
@@ -227,3 +237,4 @@ export function recomputeAllLimits () {
}
recomputeAllLimits.permission = 'admin'
recomputeAllLimits.description = 'Recompute manually the current resource set usage'

View File

@@ -1,3 +1,5 @@
export async function getAll () {
return /* await */ this.getRoles()
}
getAll.description = 'Returns the list of all existing roles'

View File

@@ -4,13 +4,14 @@ import {
} from '../utils'
export async function add ({
label,
host,
username,
password,
readOnly,
autoConnect = true
}) {
const server = await this.registerXenServer({host, username, password, readOnly})
const server = await this.registerXenServer({label, host, username, password, readOnly})
if (autoConnect) {
// Connect asynchronously, ignore any errors.
@@ -25,6 +26,10 @@ add.description = 'register a new Xen server'
add.permission = 'admin'
add.params = {
label: {
optional: true,
type: 'string'
},
host: {
type: 'string'
},
@@ -70,8 +75,8 @@ getAll.permission = 'admin'
// -------------------------------------------------------------------
export async function set ({id, host, username, password, readOnly}) {
await this.updateXenServer(id, {host, username, password, readOnly})
export async function set ({id, label, host, username, password, readOnly}) {
await this.updateXenServer(id, {label, host, username, password, readOnly})
}
set.description = 'changes the properties of a Xen server'
@@ -82,6 +87,10 @@ set.params = {
id: {
type: 'string'
},
label: {
type: 'string',
optional: true
},
host: {
type: 'string',
optional: true

View File

@@ -1,18 +1,14 @@
import {deprecate} from 'util'
import { getUserPublicProperties } from '../utils'
import {InvalidCredential, AlreadyAuthenticated} from '../api-errors'
import {invalidCredentials} from 'xo-common/api-errors'
// ===================================================================
export async function signIn (credentials) {
if (this.session.has('user_id')) {
throw new AlreadyAuthenticated()
}
const user = await this.authenticateUser(credentials)
if (!user) {
throw new InvalidCredential()
throw invalidCredentials()
}
this.session.set('user_id', user.id)

View File

@@ -34,7 +34,7 @@ set.resolve = {
// -------------------------------------------------------------------
export async function scan ({SR}) {
export async function scan ({ SR }) {
await this.getXapi(SR).call('SR.scan', SR._xapiRef)
}
@@ -50,7 +50,15 @@ scan.resolve = {
// TODO: find a way to call this "delete" and not destroy
export async function destroy ({ sr }) {
await this.getXapi(sr).destroySr(sr._xapiId)
const xapi = this.getXapi(sr)
if (sr.SR_type === 'xosan') {
const config = xapi.xo.getData(sr, 'xosan_config')
// we simply forget because the hosted disks are been destroyed with the VMs
await xapi.forgetSr(sr._xapiId)
await Promise.all(config.nodes.map(node => xapi.deleteVm(node.vm.id, true)))
return xapi.deleteNetwork(config.network)
}
await xapi.destroySr(sr._xapiId)
}
destroy.params = {
@@ -63,7 +71,7 @@ destroy.resolve = {
// -------------------------------------------------------------------
export async function forget ({SR}) {
export async function forget ({ SR }) {
await this.getXapi(SR).forgetSr(SR._xapiId)
}
@@ -77,7 +85,7 @@ forget.resolve = {
// -------------------------------------------------------------------
export async function connectAllPbds ({SR}) {
export async function connectAllPbds ({ SR }) {
await this.getXapi(SR).connectAllSrPbds(SR._xapiId)
}
@@ -91,7 +99,7 @@ connectAllPbds.resolve = {
// -------------------------------------------------------------------
export async function disconnectAllPbds ({SR}) {
export async function disconnectAllPbds ({ SR }) {
await this.getXapi(SR).disconnectAllSrPbds(SR._xapiId)
}
@@ -121,6 +129,7 @@ export async function createIso ({
deviceConfig.legacy_mode = 'true'
} else if (type === 'smb') {
path = path.replace(/\\/g, '/')
deviceConfig.type = 'cifs'
deviceConfig.username = user
deviceConfig.cifspassword = password
}
@@ -213,6 +222,51 @@ createNfs.resolve = {
host: ['host', 'host', 'administrate']
}
// -------------------------------------------------------------------
// HBA SR
// This functions creates an HBA SR
export async function createHba ({
host,
nameLabel,
nameDescription,
scsiId
}) {
const xapi = this.getXapi(host)
const deviceConfig = {
scsiId
}
const srRef = await xapi.call(
'SR.create',
host._xapiRef,
deviceConfig,
'0',
nameLabel,
nameDescription,
'lvmoohba', // SR LVM over HBA
'user', // recommended by Citrix
true,
{}
)
const sr = await xapi.call('SR.get_record', srRef)
return sr.uuid
}
createHba.params = {
host: { type: 'string' },
nameLabel: { type: 'string' },
nameDescription: { type: 'string' },
scsiId: { type: 'string' }
}
createHba.resolve = {
host: ['host', 'host', 'administrate']
}
// -------------------------------------------------------------------
// Local LVM SR
@@ -312,6 +366,55 @@ probeNfs.resolve = {
host: ['host', 'host', 'administrate']
}
// -------------------------------------------------------------------
// This function helps to detect all HBA devices on the host
export async function probeHba ({
host
}) {
const xapi = this.getXapi(host)
let xml
try {
await xapi.call(
'SR.probe',
host._xapiRef,
'type',
{}
)
throw new Error('the call above should have thrown an error')
} catch (error) {
if (error.code !== 'SR_BACKEND_FAILURE_107') {
throw error
}
xml = parseXml(error.params[2])
}
const hbaDevices = []
forEach(ensureArray(xml.Devlist.BlockDevice), hbaDevice => {
hbaDevices.push({
hba: hbaDevice.hba.trim(),
path: hbaDevice.path.trim(),
scsciId: hbaDevice.SCSIid.trim(),
size: hbaDevice.size.trim(),
vendor: hbaDevice.vendor.trim()
})
})
return hbaDevices
}
probeHba.params = {
host: { type: 'string' }
}
probeHba.resolve = {
host: ['host', 'host', 'administrate']
}
// -------------------------------------------------------------------
// ISCSI SR
@@ -571,7 +674,7 @@ export async function probeIscsiExists ({
const srs = []
forEach(ensureArray(xml['SRlist'].SR), sr => {
// get the UUID of SR connected to this LUN
srs.push({uuid: sr.UUID.trim()})
srs.push({ uuid: sr.UUID.trim() })
})
return srs
@@ -613,7 +716,7 @@ export async function probeNfsExists ({
forEach(ensureArray(xml['SRlist'].SR), sr => {
// get the UUID of SR connected to this LUN
srs.push({uuid: sr.UUID.trim()})
srs.push({ uuid: sr.UUID.trim() })
})
return srs

View File

@@ -2,7 +2,7 @@ import forEach from 'lodash/forEach'
import getKeys from 'lodash/keys'
import moment from 'moment-timezone'
import { NoSuchObject } from '../api-errors'
import { noSuchObject } from 'xo-common/api-errors'
import { version as xoServerVersion } from '../../package.json'
// ===================================================================
@@ -50,7 +50,7 @@ export function methodSignature ({method: name}) {
const method = this.apiMethods[name]
if (!method) {
throw new NoSuchObject()
throw noSuchObject()
}
// Return an array for compatibility with XML-RPC.

View File

@@ -1,10 +1,10 @@
import {InvalidParameters} from '../api-errors'
import {invalidParameters} from 'xo-common/api-errors'
import { getUserPublicProperties, mapToArray } from '../utils'
// ===================================================================
export async function create ({email, password, permission}) {
return (await this.createUser(email, {password, permission})).id
return (await this.createUser({email, password, permission})).id
}
create.description = 'creates a new user'
@@ -22,7 +22,7 @@ create.params = {
// Deletes an existing user.
async function delete_ ({id}) {
if (id === this.session.get('user_id')) {
throw new InvalidParameters('a user cannot delete itself')
throw invalidParameters('a user cannot delete itself')
}
await this.deleteUser(id)
@@ -61,10 +61,10 @@ export async function set ({id, email, password, permission, preferences}) {
const isAdmin = this.user && this.user.permission === 'admin'
if (isAdmin) {
if (permission && id === this.session.get('user_id')) {
throw new InvalidParameters('a user cannot change its own permission')
throw invalidParameters('a user cannot change its own permission')
}
} else if (email || password || permission) {
throw new InvalidParameters('this properties can only changed by an administrator')
throw invalidParameters('this properties can only changed by an administrator')
}
await this.updateUser(id, {email, password, permission, preferences})

View File

@@ -3,9 +3,9 @@
{coroutine: $coroutine} = require 'bluebird'
{format} = require 'json-rpc-peer'
{InvalidParameters} = require '../api-errors'
{invalidParameters} = require 'xo-common/api-errors'
{isArray: $isArray, parseSize} = require '../utils'
{JsonRpcError} = require '../api-errors'
{JsonRpcError} = require 'json-rpc-peer'
#=====================================================================
@@ -38,7 +38,7 @@ set = $coroutine (params) ->
size = parseSize(params.size)
if size < vdi.size
throw new InvalidParameters(
throw invalidParameters(
"cannot set new size (#{size}) below the current size (#{vdi.size})"
)
yield xapi.resizeVdi(ref, size)

View File

@@ -1,5 +1,3 @@
import forEach from 'lodash/forEach'
import {
diffItems,
noop,
@@ -10,12 +8,11 @@ import {
// TODO: move into vm and rename to removeInterface
async function delete_ ({vif}) {
const { id } = vif
const dealloc = address => {
this.deallocIpAddress(address, id)::pCatch(noop)
}
forEach(vif.allowedIpv4Addresses, dealloc)
forEach(vif.allowedIpv6Addresses, dealloc)
this.allocIpAddresses(
vif.id,
null,
vif.allowedIpv4Addresses.concat(vif.allowedIpv6Addresses)
)::pCatch(noop)
await this.getXapi(vif).deleteVif(vif._xapiId)
}
@@ -34,7 +31,7 @@ delete_.resolve = {
// TODO: move into vm and rename to disconnectInterface
export async function disconnect ({vif}) {
// TODO: check if VIF is attached before
await this.getXapi(vif).call('VIF.unplug_force', vif._xapiRef)
await this.getXapi(vif).disconnectVif(vif._xapiId)
}
disconnect.params = {
@@ -49,7 +46,7 @@ disconnect.resolve = {
// TODO: move into vm and rename to connectInterface
export async function connect ({vif}) {
// TODO: check if VIF is attached before
await this.getXapi(vif).call('VIF.plug', vif._xapiRef)
await this.getXapi(vif).connectVif(vif._xapiId)
}
connect.params = {
@@ -62,18 +59,54 @@ connect.resolve = {
// -------------------------------------------------------------------
export function set ({ vif, allowedIpv4Addresses, allowedIpv6Addresses }) {
const { id } = vif
const handle = ([ newAddresses, oldAddresses ]) => {
forEach(newAddresses, address => {
this.allocIpAddress(address, id)::pCatch(noop)
})
forEach(oldAddresses, address => {
this.deallocIpAddress(address, id)::pCatch(noop)
})
export async function set ({
vif,
network,
mac,
allowedIpv4Addresses,
allowedIpv6Addresses,
attached
}) {
const oldIpAddresses = vif.allowedIpv4Addresses.concat(vif.allowedIpv6Addresses)
const newIpAddresses = []
{
const { push } = newIpAddresses
push.apply(newIpAddresses, allowedIpv4Addresses || vif.allowedIpv4Addresses)
push.apply(newIpAddresses, allowedIpv6Addresses || vif.allowedIpv6Addresses)
}
handle(diffItems(allowedIpv4Addresses, vif.allowedIpv4Addresses))
handle(diffItems(allowedIpv6Addresses, vif.allowedIpv6Addresses))
if (network || mac) {
const xapi = this.getXapi(vif)
const vm = xapi.getObject(vif.$VM)
mac == null && (mac = vif.MAC)
network = xapi.getObject((network && network.id) || vif.$network)
attached == null && (attached = vif.attached)
await this.allocIpAddresses(vif.id, null, oldIpAddresses)
await xapi.deleteVif(vif._xapiId)
// create new VIF with new parameters
const newVif = await xapi.createVif(vm.$id, network.$id, {
mac,
currently_attached: attached,
ipv4_allowed: newIpAddresses
})
await this.allocIpAddresses(newVif.$id, newIpAddresses)
return
}
const [ addAddresses, removeAddresses ] = diffItems(
newIpAddresses,
oldIpAddresses
)
await this.allocIpAddresses(
vif.id,
addAddresses,
removeAddresses
)
return this.getXapi(vif).editVif(vif._xapiId, {
ipv4Allowed: allowedIpv4Addresses,
@@ -82,6 +115,9 @@ export function set ({ vif, allowedIpv4Addresses, allowedIpv6Addresses }) {
}
set.params = {
id: { type: 'string' },
network: { type: 'string', optional: true },
mac: { type: 'string', optional: true },
allowedIpv4Addresses: {
type: 'array',
items: {
@@ -95,9 +131,11 @@ set.params = {
type: 'string'
},
optional: true
}
},
attached: { type: 'boolean', optional: true }
}
set.resolve = {
vif: ['id', 'VIF', 'operate']
vif: ['id', 'VIF', 'operate'],
network: ['network', 'network', 'operate']
}

View File

@@ -3,24 +3,27 @@ $debug = (require 'debug') 'xo:api:vm'
$filter = require 'lodash/filter'
$findIndex = require 'lodash/findIndex'
$findWhere = require 'lodash/find'
concat = require 'lodash/concat'
endsWith = require 'lodash/endsWith'
escapeStringRegexp = require 'escape-string-regexp'
eventToPromise = require 'event-to-promise'
merge = require 'lodash/merge'
sortBy = require 'lodash/sortBy'
startsWith = require 'lodash/startsWith'
{coroutine: $coroutine} = require 'bluebird'
{format} = require 'json-rpc-peer'
{
GenericError,
InvalidParameters,
Unauthorized
} = require('../api-errors')
forbiddenOperation,
invalidParameters,
unauthorized
} = require('xo-common/api-errors')
{
forEach,
formatXml: $js2xml,
isArray: $isArray,
map,
mapFilter,
mapToArray,
noop,
parseSize,
@@ -48,7 +51,7 @@ checkPermissionOnSrs = (vm, permission = 'operate') -> (
)
return @hasPermissions(@session.get('user_id'), permissions).then((success) => (
throw new Unauthorized() unless success
throw unauthorized() unless success
))
)
@@ -61,17 +64,10 @@ extract = (obj, prop) ->
# TODO: Implement ACLs
create = $coroutine (params) ->
checkLimits = limits = null
{ user } = this
resourceSet = extract(params, 'resourceSet')
if resourceSet
yield this.checkResourceSetConstraints(resourceSet, user.id, objectIds)
checkLimits = $coroutine (limits2) =>
yield this.allocateLimitsInResourceSet(limits, resourceSet)
yield this.allocateLimitsInResourceSet(limits2, resourceSet)
else unless user.permission is 'admin'
throw new Unauthorized()
if not resourceSet and user.permission isnt 'admin'
throw unauthorized()
template = extract(params, 'template')
params.template = template._xapiId
@@ -141,24 +137,55 @@ create = $coroutine (params) ->
return {
mac: vif.mac
network: network._xapiId
ipv4_allowed: vif.allowedIpv4Addresses
ipv6_allowed: vif.allowedIpv6Addresses
}
)
installation = extract(params, 'installation')
params.installRepository = installation && installation.repository
checkLimits = null
if resourceSet
yield this.checkResourceSetConstraints(resourceSet, user.id, objectIds)
checkLimits = $coroutine (limits2) =>
yield this.allocateLimitsInResourceSet(limits, resourceSet)
yield this.allocateLimitsInResourceSet(limits2, resourceSet)
xapiVm = yield xapi.createVm(template._xapiId, params, checkLimits)
vm = xapi.xo.addObject(xapiVm)
if resourceSet
yield Promise.all([
@addAcl(user.id, vm.id, 'admin')
if params.share
$resourceSet = yield @getResourceSet(resourceSet)
Promise.all(map($resourceSet.subjects, (subjectId) => @addAcl(subjectId, vm.id, 'admin')))
else
@addAcl(user.id, vm.id, 'admin')
xapi.xo.setData(xapiVm.$id, 'resourceSet', resourceSet)
])
for vifId in vm.VIFs
vif = @getObject(vifId, 'VIF')
yield this.allocIpAddresses(vifId, concat(vif.allowedIpv4Addresses, vif.allowedIpv6Addresses)).catch(() =>
xapi.deleteVif(vif._xapiId)
)
if params.bootAfterCreate
pCatch.call(xapi.startVm(vm._xapiId), noop)
return vm.id
create.params = {
affinityHost: { type: 'string', optional: true }
bootAfterCreate: {
type: 'boolean'
optional: true
}
cloudConfig: {
type: 'string'
optional: true
@@ -195,6 +222,12 @@ create.params = {
# PV Args
pv_args: { type: 'string', optional: true }
share: {
type: 'boolean',
optional: true
}
# TODO: add the install repository!
# VBD.insert/eject
# Also for the console!
@@ -204,6 +237,7 @@ create.params = {
# Virtual interfaces to create for the new VM.
VIFs: {
optional: true
type: 'array'
items: {
type: 'object'
@@ -215,6 +249,18 @@ create.params = {
optional: true # Auto-generated per default.
type: 'string'
}
allowedIpv4Addresses: {
optional: true
type: 'array'
items: { type: 'string' }
}
allowedIpv6Addresses: {
optional: true
type: 'array'
items: { type: 'string' }
}
}
}
}
@@ -257,19 +303,43 @@ create.params = {
}
create.resolve = {
template: ['template', 'VM-template', 'administrate'],
template: ['template', 'VM-template', ''],
}
exports.create = create
#---------------------------------------------------------------------
delete_ = ({vm, delete_disks: deleteDisks}) ->
delete_ = $coroutine ({vm, delete_disks: deleteDisks = false }) ->
cpus = vm.CPUs.number
memory = vm.memory.size
xapi = @getXapi(vm)
@getAllAcls().then((acls) =>
Promise.all(mapFilter(acls, (acl) =>
if (acl.object == vm.id)
return pCatch.call(
@removeAcl(acl.subject, acl.object, acl.action),
noop
)
))
)
# Update IP pools
yield Promise.all(map(vm.VIFs, (vifId) =>
vif = xapi.getObject(vifId)
return pCatch.call(
this.allocIpAddresses(
vifId,
null,
concat(vif.ipv4_allowed, vif.ipv6_allowed)
),
noop
)
))
# Update resource sets
resourceSet = xapi.xo.getData(vm._xapiId, 'resourceSet')
if resourceSet?
disk = 0
@@ -286,10 +356,16 @@ delete_ = ({vm, delete_disks: deleteDisks}) ->
return
)
pCatch.call(@releaseLimitsInResourceSet(
@computeVmResourcesUsage(vm),
resourceSet
), noop)
resourceSetUsage = @computeVmResourcesUsage(vm)
ipPoolsUsage = yield @computeVmIpPoolsUsage(vm)
pCatch.call(
@releaseLimitsInResourceSet(
merge(resourceSetUsage, ipPoolsUsage),
resourceSet
),
noop
)
return xapi.deleteVm(vm._xapiId, deleteDisks)
@@ -372,7 +448,7 @@ migrate = $coroutine ({
])
unless yield @hasPermissions(@session.get('user_id'), permissions)
throw new Unauthorized()
throw unauthorized()
yield @getXapi(vm).migrateVm(vm._xapiId, @getXapi(host), host._xapiId, {
migrationNetworkId: migrationNetwork?._xapiId
@@ -413,14 +489,18 @@ set = (params) ->
VM = extract(params, 'VM')
xapi = @getXapi(VM)
return xapi.editVm(VM._xapiId, params, (limits, vm) =>
return xapi.editVm(VM._xapiId, params, $coroutine (limits, vm) =>
resourceSet = xapi.xo.getData(vm, 'resourceSet')
if (resourceSet)
return @allocateLimitsInResourceSet(limits, resourceSet)
try
return yield @allocateLimitsInResourceSet(limits, resourceSet)
catch error
# if the resource set no longer exist, behave as if the VM is free
throw error unless noSuchObject.is(error)
if (limits.cpuWeight && this.user.permission != 'admin')
throw new Unauthorized()
throw unauthorized()
)
set.params = {
@@ -462,10 +542,12 @@ set.params = {
cpuWeight: { type: ['integer', 'null'], optional: true }
cpuCap: { type: ['integer', 'null'], optional: true }
affinityHost: { type: ['string', 'null'], optional: true }
}
set.resolve = {
VM: ['id', ['VM', 'VM-snapshot'], 'administrate']
VM: ['id', ['VM', 'VM-snapshot', 'VM-template'], 'administrate']
}
exports.set = set
@@ -566,7 +648,7 @@ convertToTemplate = $coroutine ({vm}) ->
unless yield @hasPermissions(@session.get('user_id'), [
[ vm.$pool, 'administrate' ]
])
throw new Unauthorized()
throw unauthorized()
yield @getXapi(vm).call 'VM.set_is_a_template', vm._xapiRef, true
@@ -761,10 +843,10 @@ exports.rollingBackup = rollingBackup
rollingDrCopy = ({vm, pool, sr, tag, depth}) ->
unless sr
unless pool
throw new InvalidParameters('either pool or sr param should be specified')
throw invalidParameters('either pool or sr param should be specified')
if vm.$pool is pool.id
throw new GenericError('Disaster Recovery attempts to copy on the same pool')
throw forbiddenOperation('Disaster Recovery attempts to copy on the same pool')
sr = @getObject(pool.default_SR, 'SR')
@@ -822,7 +904,7 @@ stop = $coroutine ({vm, force}) ->
yield xapi.call 'VM.clean_shutdown', vm._xapiRef
catch error
if error.code is 'VM_MISSING_PV_DRIVERS' or error.code is 'VM_LACKS_FEATURE_SHUTDOWN'
throw new InvalidParameters('clean shutdown requires PV drivers')
throw invalidParameters('clean shutdown requires PV drivers')
else
throw error
@@ -857,18 +939,11 @@ exports.suspend = suspend
#---------------------------------------------------------------------
resume = $coroutine ({vm, force}) ->
# FIXME: WTF this is?
if not force
force = true
yield @getXapi(vm).call 'VM.resume', vm._xapiRef, false, force
return true
resume = ({vm}) ->
return @getXapi(vm).resumeVm(vm._xapiId)
resume.params = {
id: { type: 'string' }
force: { type: 'boolean', optional: true }
}
resume.resolve = {
@@ -878,15 +953,12 @@ exports.resume = resume
#---------------------------------------------------------------------
# revert a snapshot to its parent VM
revert = $coroutine ({snapshot}) ->
# Attempts a revert from this snapshot to its parent VM
yield @getXapi(snapshot).call 'VM.revert', snapshot._xapiRef
return true
revert = ({snapshot, snapshotBefore}) ->
return @getXapi(snapshot).revertVm(snapshot._xapiId, snapshotBefore)
revert.params = {
id: { type: 'string' }
id: { type: 'string' },
snapshotBefore: { type: 'boolean', optional: true }
}
revert.resolve = {
@@ -956,20 +1028,23 @@ handleVmImport = $coroutine (req, res, { data, srId, type, xapi }) ->
res.end(format.response(0, vm.$id))
catch e
res.writeHead(500)
res.end(format.error(0, new GenericError(e.message)))
res.end(format.error(0, new Error(e.message)))
return
# TODO: "sr_id" can be passed in URL to target a specific SR
import_ = $coroutine ({ data, host, sr, type }) ->
if data and type is 'xva'
throw invalidParameters('unsupported field data for the file type xva')
if not sr
if not host
throw new InvalidParameters('you must provide either host or SR')
throw invalidParameters('you must provide either host or SR')
xapi = @getXapi(host)
sr = xapi.pool.$default_SR
if not sr
throw new InvalidParameters('there is not default SR in this pool')
throw invalidParameters('there is not default SR in this pool')
# FIXME: must have administrate permission on default SR.
else
@@ -1057,21 +1132,47 @@ exports.attachDisk = attachDisk
#---------------------------------------------------------------------
# TODO: implement resource sets
createInterface = $coroutine ({vm, network, position, mtu, mac}) ->
createInterface = $coroutine ({
vm,
network,
position,
mac,
allowedIpv4Addresses,
allowedIpv6Addresses
}) ->
vif = yield @getXapi(vm).createVif(vm._xapiId, network._xapiId, {
mac,
mtu,
position
position,
ipv4_allowed: allowedIpv4Addresses,
ipv6_allowed: allowedIpv6Addresses
})
{ push } = ipAddresses = []
push.apply(ipAddresses, allowedIpv4Addresses) if allowedIpv4Addresses
push.apply(ipAddresses, allowedIpv6Addresses) if allowedIpv6Addresses
pCatch.call(@allocIpAddresses(vif.$id, allo), noop) if ipAddresses.length
return vif.$id
createInterface.params = {
vm: { type: 'string' }
network: { type: 'string' }
position: { type: ['integer', 'string'], optional: true }
mtu: { type: ['integer', 'string'], optional: true }
mac: { type: 'string', optional: true }
allowedIpv4Addresses: {
type: 'array',
items: {
type: 'string'
},
optional: true
},
allowedIpv6Addresses: {
type: 'array',
items: {
type: 'string'
},
optional: true
}
}
createInterface.resolve = {
@@ -1150,7 +1251,7 @@ setBootOrder = $coroutine ({vm, order}) ->
yield xapi.call 'VM.set_HVM_boot_params', vm._xapiRef, order
return true
throw new InvalidParameters('You can only set the boot order on a HVM guest')
throw invalidParameters('You can only set the boot order on a HVM guest')
setBootOrder.params = {
vm: { type: 'string' },

View File

@@ -1,5 +1,50 @@
import { streamToBuffer } from '../utils'
// ===================================================================
export function clean () {
return this.clean()
}
clean.permission = 'admin'
// -------------------------------------------------------------------
export async function exportConfig () {
return {
$getFrom: await this.registerHttpRequest((req, res) => {
res.writeHead(200, 'OK', {
'content-disposition': 'attachment'
})
return this.exportConfig()
},
undefined,
{ suffix: '/config.json' })
}
}
exportConfig.permission = 'admin'
// -------------------------------------------------------------------
export function getAllObjects () {
return this.getObjects()
}
getAllObjects.permission = ''
getAllObjects.description = 'Returns all XO objects'
// -------------------------------------------------------------------
export async function importConfig () {
return {
$sendTo: await this.registerHttpRequest(async (req, res) => {
await this.importConfig(JSON.parse(await streamToBuffer(req)))
res.end('config successfully imported')
})
}
}
importConfig.permission = 'admin'

478
src/api/xosan.js Normal file
View File

@@ -0,0 +1,478 @@
import arp from 'arp-a'
import createLogger from 'debug'
import defer from 'golike-defer'
import execa from 'execa'
import fromPairs from 'lodash/fromPairs'
import fs from 'fs-promise'
import map from 'lodash/map'
import splitLines from 'split-lines'
import {
filter,
includes
} from 'lodash'
import {
noop,
pCatch,
pFromCallback,
splitFirst
} from '../utils'
const debug = createLogger('xo:xosan')
const SSH_KEY_FILE = 'id_rsa_xosan'
const NETWORK_PREFIX = '172.31.100.'
const XOSAN_VM_SYSTEM_DISK_SIZE = 10 * 1024 * 1024 * 1024
const XOSAN_DATA_DISK_USEAGE_RATIO = 0.99
const XOSAN_MAX_DISK_SIZE = 2093050 * 1024 * 1024 // a bit under 2To
const CURRENTLY_CREATING_SRS = {}
export async function getVolumeInfo ({ sr }) {
const xapi = this.getXapi(sr)
const giantIPtoVMDict = {}
const data = xapi.xo.getData(sr, 'xosan_config')
if (!data || !data.nodes) {
return null
}
const nodes = data.nodes
nodes.forEach(conf => {
giantIPtoVMDict[conf.vm.ip] = xapi.getObject(conf.vm.id)
})
const oneHostAndVm = nodes[0]
const resultCmd = await remoteSsh(xapi, {
host: xapi.getObject(oneHostAndVm.host),
address: oneHostAndVm.vm.ip
}, 'gluster volume info xosan')
const result = resultCmd['stdout']
/*
Volume Name: xosan
Type: Disperse
Volume ID: 1d4d0e57-8b6b-43f9-9d40-c48be1df7548
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x (2 + 1) = 3
Transport-type: tcp
Bricks:
Brick1: 192.168.0.201:/bricks/brick1/xosan1
Brick2: 192.168.0.202:/bricks/brick1/xosan1
Brick3: 192.168.0.203:/bricks/brick1/xosan1
Options Reconfigured:
client.event-threads: 16
server.event-threads: 16
performance.client-io-threads: on
nfs.disable: on
performance.readdir-ahead: on
transport.address-family: inet
features.shard: on
features.shard-block-size: 64MB
network.remote-dio: enable
cluster.eager-lock: enable
performance.io-cache: off
performance.read-ahead: off
performance.quick-read: off
performance.stat-prefetch: on
performance.strict-write-ordering: off
cluster.server-quorum-type: server
cluster.quorum-type: auto
*/
const info = fromPairs(
splitLines(result.trim()).map(line =>
splitFirst(line, ':').map(val => val.trim())
)
)
const getNumber = item => +item.substr(5)
const brickKeys = filter(Object.keys(info), key => key.match(/^Brick[1-9]/)).sort((i1, i2) => getNumber(i1) - getNumber(i2))
// expected brickKeys : [ 'Brick1', 'Brick2', 'Brick3' ]
info['Bricks'] = brickKeys.map(key => {
const ip = info[key].split(':')[0]
return { config: info[key], ip: ip, vm: giantIPtoVMDict[ip] }
})
const entry = await pFromCallback(cb => arp.table(cb))
if (entry) {
const brick = info['Bricks'].find(element => element.config.split(':')[0] === entry.ip)
if (brick) {
brick.mac = entry.mac
}
}
return info
}
getVolumeInfo.description = 'info on gluster volume'
getVolumeInfo.permission = 'admin'
getVolumeInfo.params = {
sr: {
type: 'string'
}
}
getVolumeInfo.resolve = {
sr: ['sr', 'SR', 'administrate']
}
function floor2048 (value) {
return 2048 * Math.floor(value / 2048)
}
async function copyVm (xapi, originalVm, params) {
return { vm: await xapi.copyVm(originalVm, params.sr), params }
}
async function prepareGlusterVm (xapi, vmAndParam, xosanNetwork, increaseDataDisk = true) {
let vm = vmAndParam.vm
// refresh the object so that sizes are correct
const params = vmAndParam.params
const ip = params.xenstore_data['vm-data/ip']
const sr = xapi.getObject(params.sr.$id)
await xapi._waitObjectState(sr.$id, sr => Boolean(sr.$PBDs))
const host = sr.$PBDs[0].$host
const firstVif = vm.$VIFs[0]
if (xosanNetwork.$id !== firstVif.$network.$id) {
await xapi.call('VIF.move', firstVif.$ref, xosanNetwork.$ref)
}
await xapi.editVm(vm, {
name_label: params.name_label,
name_description: params.name_description
})
await xapi.call('VM.set_xenstore_data', vm.$ref, params.xenstore_data)
if (increaseDataDisk) {
const dataDisk = vm.$VBDs.map(vbd => vbd.$VDI).find(vdi => vdi && vdi.name_label === 'xosan_data')
const srFreeSpace = sr.physical_size - sr.physical_utilisation
// we use a percentage because it looks like the VDI overhead is proportional
const newSize = floor2048((srFreeSpace + dataDisk.virtual_size) * XOSAN_DATA_DISK_USEAGE_RATIO)
await xapi._resizeVdi(dataDisk, Math.min(newSize, XOSAN_MAX_DISK_SIZE))
}
await xapi.startVm(vm)
debug('waiting for boot of ', ip)
// wait until we find the assigned IP in the networks, we are just checking the boot is complete
const vmIsUp = vm => Boolean(vm.$guest_metrics && includes(vm.$guest_metrics.networks, ip))
vm = await xapi._waitObjectState(vm.$id, vmIsUp)
debug('booted ', ip)
return { address: ip, host, vm }
}
async function callPlugin (xapi, host, command, params) {
debug('calling plugin', host.address, command)
return JSON.parse(await xapi.call('host.call_plugin', host.$ref, 'xosan.py', command, params))
}
async function remoteSsh (xapi, hostAndAddress, cmd) {
const result = await callPlugin(xapi, hostAndAddress.host, 'run_ssh', {
destination: 'root@' + hostAndAddress.address,
cmd: cmd
})
if (result.exit !== 0) {
throw new Error('ssh error: ' + JSON.stringify(result))
}
debug(result)
return result
}
async function setPifIp (xapi, pif, address) {
await xapi.call('PIF.reconfigure_ip', pif.$ref, 'Static', address, '255.255.255.0', NETWORK_PREFIX + '1', '')
}
const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure, xapi, pif, vlan) {
let hostIpLastNumber = 1
const xosanNetwork = await xapi.createNetwork({
name: 'XOSAN network',
description: 'XOSAN network',
pifId: pif._xapiId,
mtu: 9000,
vlan: +vlan
})
$onFailure(() => xapi.deleteNetwork(xosanNetwork)::pCatch(noop))
await Promise.all(xosanNetwork.$PIFs.map(pif => setPifIp(xapi, pif, NETWORK_PREFIX + (hostIpLastNumber++))))
return xosanNetwork
})
async function getOrCreateSshKey (xapi) {
let sshKey = xapi.xo.getData(xapi.pool, 'xosan_ssh_key')
if (!sshKey) {
const readKeys = async () => {
sshKey = {
private: await fs.readFile(SSH_KEY_FILE, 'ascii'),
public: await fs.readFile(SSH_KEY_FILE + '.pub', 'ascii')
}
xapi.xo.setData(xapi.pool, 'xosan_ssh_key', sshKey)
}
try {
await readKeys()
} catch (e) {
await execa('ssh-keygen', ['-q', '-f', SSH_KEY_FILE, '-t', 'rsa', '-b', '4096', '-N', ''])
await readKeys()
}
}
return sshKey
}
async function configureGluster (redundancy, ipAndHosts, xapi, firstIpAndHost, glusterType, arbiter = null) {
const configByType = {
replica_arbiter: {
creation: 'replica 3 arbiter 1',
extra: []
},
replica: {
creation: 'replica ' + redundancy + ' ',
extra: ['gluster volume set xosan cluster.data-self-heal on']
},
disperse: {
creation: 'disperse ' + ipAndHosts.length + ' redundancy ' + redundancy + ' ',
extra: []
}
}
let brickVms = arbiter ? ipAndHosts.concat(arbiter) : ipAndHosts
for (let i = 1; i < brickVms.length; i++) {
await remoteSsh(xapi, firstIpAndHost, 'gluster peer probe ' + brickVms[i].address)
}
const creation = configByType[glusterType].creation
const volumeCreation = 'gluster volume create xosan ' + creation +
' ' + brickVms.map(ipAndHost => (ipAndHost.address + ':/bricks/xosan/xosandir')).join(' ')
debug('creating volume: ', volumeCreation)
await remoteSsh(xapi, firstIpAndHost, volumeCreation)
await remoteSsh(xapi, firstIpAndHost, 'gluster volume set xosan network.remote-dio enable')
await remoteSsh(xapi, firstIpAndHost, 'gluster volume set xosan cluster.eager-lock enable')
await remoteSsh(xapi, firstIpAndHost, 'gluster volume set xosan performance.io-cache off')
await remoteSsh(xapi, firstIpAndHost, 'gluster volume set xosan performance.read-ahead off')
await remoteSsh(xapi, firstIpAndHost, 'gluster volume set xosan performance.quick-read off')
await remoteSsh(xapi, firstIpAndHost, 'gluster volume set xosan performance.strict-write-ordering off')
await remoteSsh(xapi, firstIpAndHost, 'gluster volume set xosan client.event-threads 8')
await remoteSsh(xapi, firstIpAndHost, 'gluster volume set xosan server.event-threads 8')
await remoteSsh(xapi, firstIpAndHost, 'gluster volume set xosan performance.io-thread-count 64')
await remoteSsh(xapi, firstIpAndHost, 'gluster volume set xosan performance.stat-prefetch on')
await remoteSsh(xapi, firstIpAndHost, 'gluster volume set xosan features.shard on')
await remoteSsh(xapi, firstIpAndHost, 'gluster volume set xosan features.shard-block-size 512MB')
for (const confChunk of configByType[glusterType].extra) {
await remoteSsh(xapi, firstIpAndHost, confChunk)
}
await remoteSsh(xapi, firstIpAndHost, 'gluster volume start xosan')
}
export const createSR = defer.onFailure(async function ($onFailure, { template, pif, vlan, srs, glusterType, redundancy }) {
if (!this.requestResource) {
throw new Error('requestResource is not a function')
}
if (srs.length < 1) {
return // TODO: throw an error
}
let vmIpLastNumber = 101
const xapi = this.getXapi(srs[0])
if (CURRENTLY_CREATING_SRS[xapi.pool.$id]) {
throw new Error('createSR is already running for this pool')
}
CURRENTLY_CREATING_SRS[xapi.pool.$id] = true
try {
const xosanNetwork = await createNetworkAndInsertHosts(xapi, pif, vlan)
$onFailure(() => xapi.deleteNetwork(xosanNetwork)::pCatch(noop))
const sshKey = await getOrCreateSshKey(xapi)
const srsObjects = map(srs, srId => xapi.getObject(srId))
const vmParameters = map(srs, srId => {
const sr = xapi.getObject(srId)
const host = sr.$PBDs[0].$host
return {
sr,
host,
name_label: `XOSAN - ${sr.name_label} - ${host.name_label}`,
name_description: 'Xosan VM storing data on volume ' + sr.name_label,
// the values of the xenstore_data object *have* to be string, don't forget.
xenstore_data: {
'vm-data/hostname': 'XOSAN' + sr.name_label,
'vm-data/sshkey': sshKey.public,
'vm-data/ip': NETWORK_PREFIX + (vmIpLastNumber++),
'vm-data/mtu': String(xosanNetwork.MTU),
'vm-data/vlan': String(vlan)
}
}
})
await Promise.all(vmParameters.map(vmParam => callPlugin(xapi, vmParam.host, 'receive_ssh_keys', {
private_key: sshKey.private,
public_key: sshKey.public,
force: 'true'
})))
const firstVM = await xapi.importVm(
await this.requestResource('xosan', template.id, template.version),
{ srId: vmParameters[0].sr.$ref, type: 'xva' }
)
$onFailure(() => xapi.deleteVm(firstVM, true)::pCatch(noop))
await xapi.editVm(firstVM, {
autoPoweron: true
})
const copiedVms = await Promise.all(vmParameters.slice(1).map(param => copyVm(xapi, firstVM, param)))
// TODO: Promise.all() is certainly not the right operation to execute all the given promises whether they fulfill or reject.
$onFailure(() => Promise.all(copiedVms.map(vm => xapi.deleteVm(vm.vm, true)))::pCatch(noop))
const vmsAndParams = [{
vm: firstVM,
params: vmParameters[0]
}].concat(copiedVms)
let arbiter = null
if (srs.length === 2) {
const sr = vmParameters[0].sr
const arbiterConfig = {
sr: sr,
host: vmParameters[0].host,
name_label: vmParameters[0].name_label + ' arbiter',
name_description: 'Xosan VM storing data on volume ' + sr.name_label,
xenstore_data: {
'vm-data/hostname': 'XOSAN' + sr.name_label + '_arb',
'vm-data/sshkey': sshKey.public,
'vm-data/ip': NETWORK_PREFIX + (vmIpLastNumber++),
'vm-data/mtu': String(xosanNetwork.MTU),
'vm-data/vlan': String(vlan)
}
}
const arbiterVm = await copyVm(xapi, firstVM, arbiterConfig)
$onFailure(() => xapi.deleteVm(arbiterVm.vm, true)::pCatch(noop))
arbiter = await prepareGlusterVm(xapi, arbiterVm, xosanNetwork, false)
}
const ipAndHosts = await Promise.all(map(vmsAndParams, vmAndParam => prepareGlusterVm(xapi, vmAndParam, xosanNetwork)))
const firstIpAndHost = ipAndHosts[0]
await configureGluster(redundancy, ipAndHosts, xapi, firstIpAndHost, glusterType, arbiter)
debug('xosan gluster volume started')
const config = { server: firstIpAndHost.address + ':/xosan', backupserver: ipAndHosts[1].address }
const xosanSr = await xapi.call('SR.create', srsObjects[0].$PBDs[0].$host.$ref, config, 0, 'XOSAN', 'XOSAN', 'xosan', '', true, {})
if (arbiter) {
ipAndHosts.push(arbiter)
}
// we just forget because the cleanup actions will be executed before.
$onFailure(() => xapi.forgetSr(xosanSr)::pCatch(noop))
await xapi.xo.setData(xosanSr, 'xosan_config', {
nodes: ipAndHosts.map(param => ({
host: param.host.$id,
vm: { id: param.vm.$id, ip: param.address }
})),
network: xosanNetwork.$id
})
} finally {
delete CURRENTLY_CREATING_SRS[xapi.pool.$id]
}
})
createSR.description = 'create gluster VM'
createSR.permission = 'admin'
createSR.params = {
srs: {
type: 'array',
items: {
type: 'string'
}
},
pif: {
type: 'string'
},
vlan: {
type: 'string'
},
glusterType: {
type: 'string'
},
redundancy: {
type: 'number'
}
}
createSR.resolve = {
srs: ['sr', 'SR', 'administrate'],
pif: ['pif', 'PIF', 'administrate']
}
export function checkSrIsBusy ({ poolId }) {
return !!CURRENTLY_CREATING_SRS[poolId]
}
checkSrIsBusy.description = 'checks if there is a xosan SR curently being created on the given pool id'
checkSrIsBusy.permission = 'admin'
checkSrIsBusy.params = { poolId: { type: 'string' } }
const POSSIBLE_CONFIGURATIONS = {}
POSSIBLE_CONFIGURATIONS[2] = [{ layout: 'replica_arbiter', redundancy: 3, capacity: 1 }]
POSSIBLE_CONFIGURATIONS[3] = [
{ layout: 'disperse', redundancy: 1, capacity: 2 },
{ layout: 'replica', redundancy: 3, capacity: 1 }]
POSSIBLE_CONFIGURATIONS[4] = [{ layout: 'replica', redundancy: 2, capacity: 1 }]
POSSIBLE_CONFIGURATIONS[5] = [{ layout: 'disperse', redundancy: 1, capacity: 4 }]
POSSIBLE_CONFIGURATIONS[6] = [
{ layout: 'disperse', redundancy: 2, capacity: 4 },
{ layout: 'replica', redundancy: 2, capacity: 3 },
{ layout: 'replica', redundancy: 3, capacity: 2 }]
POSSIBLE_CONFIGURATIONS[7] = [{ layout: 'disperse', redundancy: 3, capacity: 4 }]
POSSIBLE_CONFIGURATIONS[8] = [{ layout: 'replica', redundancy: 2, capacity: 4 }]
POSSIBLE_CONFIGURATIONS[9] = [
{ layout: 'disperse', redundancy: 1, capacity: 8 },
{ layout: 'replica', redundancy: 3, capacity: 3 }]
POSSIBLE_CONFIGURATIONS[10] = [
{ layout: 'disperse', redundancy: 2, capacity: 8 },
{ layout: 'replica', redundancy: 2, capacity: 5 }]
POSSIBLE_CONFIGURATIONS[11] = [{ layout: 'disperse', redundancy: 3, capacity: 8 }]
POSSIBLE_CONFIGURATIONS[12] = [
{ layout: 'disperse', redundancy: 4, capacity: 8 },
{ layout: 'replica', redundancy: 2, capacity: 6 }]
POSSIBLE_CONFIGURATIONS[13] = [{ layout: 'disperse', redundancy: 5, capacity: 8 }]
POSSIBLE_CONFIGURATIONS[14] = [
{ layout: 'disperse', redundancy: 6, capacity: 8 },
{ layout: 'replica', redundancy: 2, capacity: 7 }]
POSSIBLE_CONFIGURATIONS[15] = [
{ layout: 'disperse', redundancy: 7, capacity: 8 },
{ layout: 'replica', redundancy: 3, capacity: 5 }]
POSSIBLE_CONFIGURATIONS[16] = [{ layout: 'replica', redundancy: 2, capacity: 8 }]
export async function computeXosanPossibleOptions ({ lvmSrs }) {
const count = lvmSrs.length
const configurations = POSSIBLE_CONFIGURATIONS[count]
if (!configurations) {
return null
}
if (count > 0) {
const xapi = this.getXapi(lvmSrs[0])
const srs = map(lvmSrs, srId => xapi.getObject(srId))
const srSizes = map(srs, sr => sr.physical_size - sr.physical_utilisation)
const minSize = Math.min.apply(null, srSizes)
const brickSize = (minSize - XOSAN_VM_SYSTEM_DISK_SIZE) * XOSAN_DATA_DISK_USEAGE_RATIO
return configurations.map(conf => ({ ...conf, availableSpace: brickSize * conf.capacity }))
}
}
computeXosanPossibleOptions.params = {
lvmSrs: {
type: 'array',
items: {
type: 'string'
}
}
}
// ---------------------------------------------------------------------
export async function downloadAndInstallXosanPack ({ id, version, pool }) {
if (!this.requestResource) {
throw new Error('requestResource is not a function')
}
const xapi = this.getXapi(pool.id)
const res = await this.requestResource('xosan', id, version)
return xapi.installSupplementalPackOnAllHosts(res)
}
downloadAndInstallXosanPack.description = 'Register a resource via cloud plugin'
downloadAndInstallXosanPack.params = {
id: { type: 'string' },
version: { type: 'string' },
pool: { type: 'string' }
}
downloadAndInstallXosanPack.resolve = {
pool: ['pool', 'pool', 'administrate']
}
downloadAndInstallXosanPack.permission = 'admin'

View File

@@ -3,6 +3,7 @@ import difference from 'lodash/difference'
import filter from 'lodash/filter'
import getKey from 'lodash/keys'
import {createClient as createRedisClient} from 'redis'
import {v4 as generateUuid} from 'uuid'
import {
forEach,
@@ -35,13 +36,13 @@ export default class Redis extends Collection {
connection,
indexes = [],
prefix,
uri = 'tcp://localhost:6379'
uri
}) {
super()
this.indexes = indexes
this.prefix = prefix
this.redis = promisifyAll.call(connection || createRedisClient(uri))
this.redis = promisifyAll(connection || createRedisClient(uri))
}
_extract (ids) {
@@ -68,12 +69,12 @@ export default class Redis extends Collection {
// TODO: remove “replace” which is a temporary measure, implement
// “set()” instead.
const {indexes, prefix, redis, idPrefix = ''} = this
const {indexes, prefix, redis} = this
return Promise.all(mapToArray(models, async model => {
// Generate a new identifier if necessary.
if (model.id === undefined) {
model.id = idPrefix + String(await redis.incr(prefix + '_id'))
model.id = generateUuid()
}
const success = await redis.sadd(prefix + '_ids', model.id)
@@ -149,6 +150,10 @@ export default class Redis extends Collection {
}
_remove (ids) {
if (isEmpty(ids)) {
return
}
const {prefix, redis} = this
// TODO: handle indexes.

View File

@@ -1,65 +1,19 @@
import bind from 'lodash/bind'
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
import {
isArray,
isPromise,
isFunction,
noop,
pFinally
isFunction
} from './utils'
// ===================================================================
const {
defineProperties,
defineProperty,
getOwnPropertyDescriptor
} = Object
// ===================================================================
// See: https://github.com/jayphelps/core-decorators.js#autobind
//
// TODO: make it work for all class methods.
export const autobind = (target, key, {
configurable,
enumerable,
value: fn,
writable
}) => ({
configurable,
enumerable,
get () {
if (this === target) {
return fn
}
const bound = bind(fn, this)
defineProperty(this, key, {
configurable: true,
enumerable: false,
value: bound,
writable: true
})
return bound
},
set (newValue) {
// Cannot use assignment because it will call the setter on
// the prototype.
defineProperty(this, key, {
configurable: true,
enumerable: true,
value: newValue,
writable: true
})
}
})
// -------------------------------------------------------------------
// Debounce decorator for methods.
//
// See: https://github.com/wycats/javascript-decorators
@@ -70,7 +24,7 @@ export const debounce = duration => (target, name, descriptor) => {
// This symbol is used to store the related data directly on the
// current object.
const s = Symbol()
const s = Symbol(`debounced ${name} data`)
function debounced () {
const data = this[s] || (this[s] = {
@@ -98,119 +52,8 @@ export const debounce = duration => (target, name, descriptor) => {
// -------------------------------------------------------------------
const _push = Array.prototype.push
export const deferrable = (target, name, descriptor) => {
let fn
function newFn () {
const deferreds = []
const defer = fn => {
deferreds.push(fn)
}
defer.clear = () => {
deferreds.length = 0
}
const args = [ defer ]
_push.apply(args, arguments)
let executeDeferreds = () => {
let i = deferreds.length
while (i) {
deferreds[--i]()
}
}
try {
const result = fn.apply(this, args)
if (isPromise(result)) {
result::pFinally(executeDeferreds)
// Do not execute the deferreds in the finally block.
executeDeferreds = noop
}
return result
} finally {
executeDeferreds()
}
}
if (descriptor) {
fn = descriptor.value
descriptor.value = newFn
return descriptor
}
fn = target
return newFn
}
// Deferred functions are only executed on failures.
//
// i.e.: defer.clear() is automatically called in case of success.
deferrable.onFailure = (target, name, descriptor) => {
let fn
function newFn (defer) {
const result = fn.apply(this, arguments)
return isPromise(result)
? result.then(result => {
defer.clear()
return result
})
: (defer.clear(), result)
}
if (descriptor) {
fn = descriptor.value
descriptor.value = newFn
} else {
fn = target
target = newFn
}
return deferrable(target, name, descriptor)
}
// Deferred functions are only executed on success.
//
// i.e.: defer.clear() is automatically called in case of failure.
deferrable.onSuccess = (target, name, descriptor) => {
let fn
function newFn (defer) {
try {
const result = fn.apply(this, arguments)
return isPromise(result)
? result.then(null, error => {
defer.clear()
throw error
})
: result
} catch (error) {
defer.clear()
throw error
}
}
if (descriptor) {
fn = descriptor.value
descriptor.value = newFn
} else {
fn = target
target = newFn
}
return deferrable(target, name, descriptor)
}
// -------------------------------------------------------------------
const _ownKeys = (
typeof Reflect !== 'undefined' && Reflect.ownKeys ||
(typeof Reflect !== 'undefined' && Reflect.ownKeys) ||
(({
getOwnPropertyNames: names,
getOwnPropertySymbols: symbols
@@ -220,22 +63,6 @@ const _ownKeys = (
)(Object)
)
const _bindPropertyDescriptor = (descriptor, thisArg) => {
const { get, set, value } = descriptor
if (get) {
descriptor.get = bind(get, thisArg)
}
if (set) {
descriptor.set = bind(set, thisArg)
}
if (isFunction(value)) {
descriptor.value = bind(value, thisArg)
}
return descriptor
}
const _isIgnoredProperty = name => (
name[0] === '_' ||
name === 'constructor'
@@ -284,7 +111,7 @@ export const mixin = MixIns => Class => {
defineProperties(prototype, descriptors)
}
const Decorator = (...args) => {
function Decorator (...args) {
const instance = new Class(...args)
for (const MixIn of MixIns) {
@@ -300,8 +127,9 @@ export const mixin = MixIns => Class => {
throw new Error(`${name}#${prop} is already defined`)
}
descriptors[prop] = _bindPropertyDescriptor(
getOwnPropertyDescriptor(prototype, prop),
descriptors[prop] = getBoundPropertyDescriptor(
prototype,
prop,
mixinInstance
)
}

View File

@@ -1,48 +1,9 @@
/* eslint-env mocha */
/* eslint-env jest */
import expect from 'must'
import {debounce} from './decorators'
// ===================================================================
import {autobind, debounce, deferrable} from './decorators'
// ===================================================================
describe('autobind()', () => {
class Foo {
@autobind
getFoo () {
return this
}
}
it('returns a bound instance for a method', () => {
const foo = new Foo()
const { getFoo } = foo
expect(getFoo()).to.equal(foo)
})
it('returns the same bound instance each time', () => {
const foo = new Foo()
expect(foo.getFoo).to.equal(foo.getFoo)
})
it('works with multiple instances of the same class', () => {
const foo1 = new Foo()
const foo2 = new Foo()
const getFoo1 = foo1.getFoo
const getFoo2 = foo2.getFoo
expect(getFoo1()).to.equal(foo1)
expect(getFoo2()).to.equal(foo2)
})
})
// -------------------------------------------------------------------
describe('debounce()', () => {
let i
@@ -60,114 +21,19 @@ describe('debounce()', () => {
it('works', done => {
const foo = new Foo()
expect(i).to.equal(0)
expect(i).toBe(0)
foo.foo()
expect(i).to.equal(1)
expect(i).toBe(1)
foo.foo()
expect(i).to.equal(1)
expect(i).toBe(1)
setTimeout(() => {
foo.foo()
expect(i).to.equal(2)
expect(i).toBe(2)
done()
}, 2e1)
})
})
// -------------------------------------------------------------------
describe('deferrable()', () => {
it('works with normal termination', () => {
let i = 0
const fn = deferrable(defer => {
i += 2
defer(() => { i -= 2 })
i *= 2
defer(() => { i /= 2 })
return i
})
expect(fn()).to.equal(4)
expect(i).to.equal(0)
})
it('defer.clear() removes previous deferreds', () => {
let i = 0
const fn = deferrable(defer => {
i += 2
defer(() => { i -= 2 })
defer.clear()
i *= 2
defer(() => { i /= 2 })
return i
})
expect(fn()).to.equal(4)
expect(i).to.equal(2)
})
it('works with exception', () => {
let i = 0
const fn = deferrable(defer => {
i += 2
defer(() => { i -= 2 })
i *= 2
defer(() => { i /= 2 })
throw i
})
expect(() => fn()).to.throw(4)
expect(i).to.equal(0)
})
it('works with promise resolution', async () => {
let i = 0
const fn = deferrable(async defer => {
i += 2
defer(() => { i -= 2 })
i *= 2
defer(() => { i /= 2 })
// Wait a turn of the events loop.
await Promise.resolve()
return i
})
await expect(fn()).to.eventually.equal(4)
expect(i).to.equal(0)
})
it('works with promise rejection', async () => {
let i = 0
const fn = deferrable(async defer => {
// Wait a turn of the events loop.
await Promise.resolve()
i += 2
defer(() => { i -= 2 })
i *= 2
defer(() => { i /= 2 })
// Wait a turn of the events loop.
await Promise.resolve()
throw i
})
await expect(fn()).to.reject.to.equal(4)
expect(i).to.equal(0)
})
})

View File

@@ -20,14 +20,15 @@ import { boot16 as fat16 } from 'fatfs/structs'
const SECTOR_SIZE = 512
const TEN_MIB = 10 * 1024 * 1024
// Creates a 10MB buffer and initializes it as a FAT 16 volume.
export function init () {
const buf = new Buffer(10 * 1024 * 1024) // 10MB
buf.fill(0)
const buf = Buffer.alloc(TEN_MIB)
// https://github.com/natevw/fatfs/blob/master/structs.js
fat16.pack({
jmpBoot: new Buffer('eb3c90', 'hex'),
jmpBoot: Buffer.from('eb3c90', 'hex'),
OEMName: 'mkfs.fat',
BytsPerSec: SECTOR_SIZE,
SecPerClus: 4,

View File

@@ -1,27 +1,23 @@
import assign from 'lodash/assign'
import startsWith from 'lodash/startsWith'
import { parse as parseUrl } from 'url'
import isRedirect from 'is-redirect'
import { assign, isString, startsWith } from 'lodash'
import { cancellable } from 'promise-toolbox'
import { request as httpRequest } from 'http'
import { request as httpsRequest } from 'https'
import { stringify as formatQueryString } from 'querystring'
import {
isString,
streamToBuffer
} from './utils'
format as formatUrl,
parse as parseUrl,
resolve as resolveUrl
} from 'url'
import { streamToBuffer } from './utils'
// -------------------------------------------------------------------
export default (...args) => {
const raw = opts => {
let req
const pResponse = new Promise((resolve, reject) => {
const opts = {}
for (let i = 0, length = args.length; i < length; ++i) {
const arg = args[i]
assign(opts, isString(arg) ? parseUrl(arg) : arg)
}
const {
body,
headers: { ...headers } = {},
@@ -62,11 +58,16 @@ export default (...args) => {
}
}
req = (
protocol && startsWith(protocol.toLowerCase(), 'https')
? httpsRequest
: httpRequest
)({
const secure = protocol && startsWith(protocol.toLowerCase(), 'https')
let requestFn
if (secure) {
requestFn = httpsRequest
} else {
requestFn = httpRequest
delete rest.rejectUnauthorized
}
req = requestFn({
...rest,
headers
})
@@ -98,6 +99,11 @@ export default (...args) => {
}
const code = response.statusCode
const { location } = response.headers
if (isRedirect(code) && location) {
assign(opts, parseUrl(resolveUrl(formatUrl(opts), location)))
return raw(opts)
}
if (code < 200 || code >= 300) {
const error = new Error(response.statusMessage)
error.code = code
@@ -112,13 +118,27 @@ export default (...args) => {
return response
})
pResponse.cancel = () => {
req.emit('error', new Error('HTTP request canceled!'))
req.abort()
}
pResponse.readAll = () => pResponse.then(response => response.readAll())
pResponse.request = req
return pResponse
}
const httpRequestPlus = ($cancelToken, ...args) => {
const opts = {}
for (let i = 0, length = args.length; i < length; ++i) {
const arg = args[i]
assign(opts, isString(arg) ? parseUrl(arg) : arg)
}
const pResponse = raw(opts)
$cancelToken.promise.then(() => {
const { request } = pResponse
request.emit('error', new Error('HTTP request canceled!'))
request.abort()
})
pResponse.readAll = () => pResponse.then(response => response.readAll())
return pResponse
}
export default cancellable(httpRequestPlus)

View File

@@ -1,10 +1,8 @@
import createLogger from 'debug'
const debug = createLogger('xo:main')
import appConf from 'app-conf'
import bind from 'lodash/bind'
import blocked from 'blocked'
import createExpress from 'express'
import createLogger from 'debug'
import eventToPromise from 'event-to-promise'
import has from 'lodash/has'
import helmet from 'helmet'
@@ -18,10 +16,11 @@ import { createServer as createProxyServer } from 'http-proxy'
import { join as joinPath } from 'path'
import JsonRpcPeer from 'json-rpc-peer'
import { InvalidCredential } from './api-errors'
import { invalidCredentials } from 'xo-common/api-errors'
import {
readFile,
readdir
ensureDir,
readdir,
readFile
} from 'fs-promise'
import WebServer from 'http-server-plus'
@@ -48,6 +47,8 @@ import { Strategy as LocalStrategy } from 'passport-local'
// ===================================================================
const debug = createLogger('xo:main')
const warn = (...args) => {
console.warn('[Warn]', ...args)
}
@@ -130,6 +131,11 @@ async function setUpPassport (express, xo) {
}))
})
express.get('/signout', (req, res) => {
res.clearCookie('token')
res.redirect('/')
})
const SIGNIN_STRATEGY_RE = /^\/signin\/([^/]+)(\/callback)?(:?\?.*)?$/
express.use(async (req, res, next) => {
const { url } = req
@@ -179,7 +185,7 @@ async function setUpPassport (express, xo) {
next()
} else if (req.cookies.token) {
next()
} else if (/favicon|fontawesome|images|styles/.test(url)) {
} else if (/favicon|fontawesome|images|styles|\.(?:css|jpg|png)$/.test(url)) {
next()
} else {
req.flash('return-url', url)
@@ -216,13 +222,19 @@ async function registerPlugin (pluginPath, pluginName) {
const {
default: factory = plugin,
configurationSchema,
configurationPresets
configurationPresets,
testSchema
} = plugin
// The default export can be either a factory or directly a plugin
// instance.
const instance = isFunction(factory)
? factory({ xo: this })
? factory({
xo: this,
getDataDir: () => {
const dir = `${this._config.datadir}/${pluginName}`
return ensureDir(dir).then(() => dir)
}})
: factory
await this.registerPlugin(
@@ -230,6 +242,7 @@ async function registerPlugin (pluginPath, pluginName) {
instance,
configurationSchema,
configurationPresets,
testSchema,
version
)
}
@@ -400,12 +413,11 @@ const setUpStaticFiles = (express, opts) => {
const setUpApi = (webServer, xo, verboseLogsOnErrors) => {
const webSocketServer = new WebSocket.Server({
server: webServer,
path: '/api/'
noServer: true
})
xo.on('stop', () => pFromCallback(cb => webSocketServer.close(cb)))
webSocketServer.on('connection', socket => {
const onConnection = socket => {
const { remoteAddress } = socket.upgradeReq.socket
debug('+ WebSocket connection (%s)', remoteAddress)
@@ -448,6 +460,11 @@ const setUpApi = (webServer, xo, verboseLogsOnErrors) => {
socket.send(data, onSend)
}
})
}
webServer.on('upgrade', (req, socket, head) => {
if (req.url === '/api/') {
webSocketServer.handleUpgrade(req, socket, head, onConnection)
}
})
}
@@ -475,7 +492,7 @@ const setUpConsoleProxy = (webServer, xo) => {
const user = await xo.authenticateUser({ token })
if (!await xo.hasPermissions(user.id, [ [ id, 'operate' ] ])) {
throw new InvalidCredential()
throw invalidCredentials()
}
const { remoteAddress } = socket
@@ -493,7 +510,7 @@ const setUpConsoleProxy = (webServer, xo) => {
proxyConsole(connection, vmConsole, xapi.sessionId)
})
} catch (error) {
console.error(error && error.stack || error)
console.error((error && error.stack) || error)
}
})
}
@@ -608,16 +625,24 @@ export default async function main (args) {
await registerPlugins(xo)
}
// Gracefully shutdown on signals.
//
// TODO: implements a timeout? (or maybe it is the services launcher
// responsibility?)
const shutdown = signal => {
debug('%s caught, closing…', signal)
xo.stop()
}
forEach([ 'SIGINT', 'SIGTERM' ], signal => {
let alreadyCalled = false
// Gracefully shutdown on signals.
process.on('SIGINT', () => shutdown('SIGINT'))
process.on('SIGTERM', () => shutdown('SIGTERM'))
process.on(signal, () => {
if (alreadyCalled) {
warn('forced exit')
process.exit(1)
}
alreadyCalled = true
debug('%s caught, closing…', signal)
xo.stop()
})
})
await eventToPromise(xo, 'stopped')

View File

@@ -9,6 +9,7 @@ import mapValues from 'lodash/mapValues'
import size from 'lodash/size'
import some from 'lodash/some'
import { BaseError } from 'make-error'
import { timeout } from 'promise-toolbox'
import { crossProduct } from './math'
import {
@@ -32,8 +33,13 @@ export class UnsupportedVectorType extends JobExecutorError {
const match = (pattern, value) => {
if (isPlainObject(pattern)) {
if (pattern.__or && size(pattern) === 1) {
return some(pattern.__or, subpattern => match(subpattern, value))
if (size(pattern) === 1) {
if (pattern.__or) {
return some(pattern.__or, subpattern => match(subpattern, value))
}
if (pattern.__not) {
return !match(pattern.__not, value)
}
}
return isPlainObject(value) && every(pattern, (subpattern, key) => (
@@ -158,8 +164,12 @@ export default class JobExecutor {
params,
start: Date.now()
}
let promise = this.xo.callApiMethod(connection, job.method, assign({}, params))
if (job.timeout) {
promise = promise::timeout(job.timeout)
}
return this.xo.callApiMethod(connection, job.method, assign({}, params)).then(
return promise.then(
value => {
this._logger.notice(`Call ${job.method} (${runCallId}) is a success. (${job.id})`, {
event: 'jobCall.end',

View File

@@ -1,12 +1,10 @@
/* eslint-env mocha */
import leche from 'leche'
import { expect } from 'chai'
/* eslint-env jest */
import { forEach } from 'lodash'
import { resolveParamsVector } from './job-executor'
describe('resolveParamsVector', function () {
leche.withData({
forEach({
'cross product with three sets': [
// Expected result.
[ { id: 3, value: 'foo', remote: 'local' },
@@ -92,9 +90,11 @@ describe('resolveParamsVector', function () {
}
}
]
}, function (expectedResult, entry, context) {
it('Resolves params vector', function () {
expect(resolveParamsVector.call(context, entry)).to.deep.have.members(expectedResult)
}, ([ expectedResult, entry, context ], name) => {
describe(`with ${name}`, () => {
it('Resolves params vector', () => {
expect(resolveParamsVector.call(context, entry)).toEqual(expectedResult)
})
})
})
})

33
src/lvm.js Normal file
View File

@@ -0,0 +1,33 @@
import execa from 'execa'
import splitLines from 'split-lines'
import { createParser } from 'parse-pairs'
import { isArray, map } from 'lodash'
// ===================================================================
const parse = createParser({
keyTransform: key => key.slice(5).toLowerCase()
})
const makeFunction = command => (fields, ...args) =>
execa.stdout(command, [
'--noheading',
'--nosuffix',
'--nameprefixes',
'--unbuffered',
'--units',
'b',
'-o',
String(fields),
...args
]).then(stdout => map(
splitLines(stdout),
isArray(fields)
? parse
: line => {
const data = parse(line)
return data[fields]
}
))
export const lvs = makeFunction('lvs')
export const pvs = makeFunction('pvs')

View File

@@ -11,7 +11,7 @@ const _combine = (vectors, n, cb) => {
const m = vector.length
if (n === 1) {
for (let i = 0; i < m; ++i) {
cb([ vector[i] ])
cb([ vector[i] ]) // eslint-disable-line standard/no-callback-literal
}
return
}

View File

@@ -1,8 +1,6 @@
/* eslint-env mocha */
import { expect } from 'chai'
import leche from 'leche'
/* eslint-env jest */
import { forEach } from 'lodash'
import { thunkToArray } from './utils'
import {
crossProduct,
@@ -10,7 +8,7 @@ import {
} from './math'
describe('mergeObjects', function () {
leche.withData({
forEach({
'Two sets of one': [
{a: 1, b: 2}, {a: 1}, {b: 2}
],
@@ -32,9 +30,11 @@ describe('mergeObjects', function () {
'No set': [
{}
]
}, function (resultSet, ...sets) {
it('Assembles all given param sets in on set', function () {
expect(mergeObjects(sets)).to.eql(resultSet)
}, ([ resultSet, ...sets ], name) => {
describe(`with ${name}`, () => {
it('Assembles all given param sets in on set', function () {
expect(mergeObjects(sets)).toEqual(resultSet)
})
})
})
})
@@ -45,7 +45,7 @@ describe('crossProduct', function () {
// Gives the product of all args
const multiplyTest = args => args.reduce((prev, curr) => prev * curr, 1)
leche.withData({
forEach({
'2 sets of 2 items to multiply': [
[10, 14, 15, 21], [[2, 3], [5, 7]], multiplyTest
],
@@ -64,9 +64,11 @@ describe('crossProduct', function () {
'2 sets of 3 items to add': [
[9, 13, 15, 10, 14, 16, 12, 16, 18], [[2, 3, 5], [7, 11, 13]], addTest
]
}, function (product, items, cb) {
it('Crosses sets of values with a crossProduct callback', function () {
expect(thunkToArray(crossProduct(items, cb))).to.have.members(product)
}, ([ product, items, cb ], name) => {
describe(`with ${name}`, () => {
it('Crosses sets of values with a crossProduct callback', function () {
expect(thunkToArray(crossProduct(items, cb)).sort()).toEqual(product.sort())
})
})
})
})

View File

@@ -1,8 +1,12 @@
import isEmpty from 'lodash/isEmpty'
import Collection from '../collection/redis'
import Model from '../model'
import { forEach } from '../utils'
import { parseProp } from './utils'
// ===================================================================
export default class Group extends Model {}
@@ -14,20 +18,16 @@ export class Groups extends Collection {
return Group
}
get idPrefix () {
return 'group:'
}
create (name) {
return this.add(new Group({
name,
users: '[]'
}))
return this.add(new Group({ name }))
}
async save (group) {
// Serializes.
group.users = JSON.stringify(group.users)
let tmp
group.users = isEmpty(tmp = group.users)
? undefined
: JSON.stringify(tmp)
return /* await */ this.update(group)
}
@@ -37,13 +37,7 @@ export class Groups extends Collection {
// Deserializes.
forEach(groups, group => {
const {users} = group
try {
group.users = JSON.parse(users)
} catch (error) {
console.warn('cannot parse group.users:', users)
group.users = []
}
group.users = parseProp('group', group, 'users', [])
})
return groups

View File

@@ -11,12 +11,7 @@ export class Jobs extends Collection {
return Job
}
get idPrefix () {
return 'job:'
}
async create (userId, job) {
job.userId = userId
async create (job) {
// Serializes.
job.paramsVector = JSON.stringify(job.paramsVector)
return /* await */ this.add(new Job(job))

View File

@@ -13,10 +13,6 @@ export class PluginsMetadata extends Collection {
return PluginMetadata
}
get idPrefix () {
return 'plugin-metadata:'
}
async save ({ id, autoload, configuration }) {
return /* await */ this.update({
id,

View File

@@ -13,10 +13,6 @@ export class Remotes extends Collection {
return Remote
}
get idPrefix () {
return 'remote-'
}
create (name, url) {
return this.add(new Remote({
name,

View File

@@ -11,10 +11,6 @@ export class Schedules extends Collection {
return Schedule
}
get idPrefix () {
return 'schedule:'
}
create (userId, job, cron, enabled, name = undefined, timezone = undefined) {
return this.add(new Schedule({
userId,

View File

@@ -1,5 +1,8 @@
import Collection from '../collection/redis'
import Model from '../model'
import { forEach } from '../utils'
import { parseProp } from './utils'
// ===================================================================
@@ -12,11 +15,26 @@ export class Servers extends Collection {
return Server
}
async create ({host, username, password, readOnly}) {
async create ({label, host, username, password, readOnly}) {
if (await this.exists({host})) {
throw new Error('server already exists')
}
return /* await */ this.add({host, username, password, readOnly})
return /* await */ this.add({label, host, username, password, readOnly})
}
async get (properties) {
const servers = await super.get(properties)
// Deserializes
forEach(servers, server => {
if (server.error) {
server.error = parseProp('server', server, 'error', '')
} else {
delete server.error
}
})
return servers
}
}

View File

@@ -4,6 +4,8 @@ import Collection from '../collection/redis'
import Model from '../model'
import { forEach } from '../utils'
import { parseProp } from './utils'
// ===================================================================
export default class User extends Model {}
@@ -14,32 +16,19 @@ User.prototype.default = {
// -------------------------------------------------------------------
const parseProp = (obj, name) => {
const value = obj[name]
if (value == null) {
return
}
try {
return JSON.parse(value)
} catch (error) {
console.warn('cannot parse user[%s] (%s):', name, value, error)
}
}
export class Users extends Collection {
get Model () {
return User
}
async create (email, properties = {}) {
async create (properties) {
const { email } = properties
// Avoid duplicates.
if (await this.exists({email})) {
throw new Error(`the user ${email} already exists`)
}
// Adds the email to the user's properties.
properties.email = email
// Create the user object.
const user = new User(properties)
@@ -50,12 +39,12 @@ export class Users extends Collection {
async save (user) {
// Serializes.
let tmp
if (!isEmpty(tmp = user.groups)) {
user.groups = JSON.stringify(tmp)
}
if (!isEmpty(tmp = user.preferences)) {
user.preferences = JSON.stringify(tmp)
}
user.groups = isEmpty(tmp = user.groups)
? undefined
: JSON.stringify(tmp)
user.preferences = isEmpty(tmp = user.preferences)
? undefined
: JSON.stringify(tmp)
return /* await */ this.update(user)
}
@@ -65,11 +54,8 @@ export class Users extends Collection {
// Deserializes
forEach(users, user => {
let tmp
user.groups = ((tmp = parseProp(user, 'groups')) && tmp.length)
? tmp
: undefined
user.preferences = parseProp(user, 'preferences')
user.groups = parseProp('user', user, 'groups', [])
user.preferences = parseProp('user', user, 'preferences', {})
})
return users

17
src/models/utils.js Normal file
View File

@@ -0,0 +1,17 @@
import prettyFormat from 'pretty-format'
export const parseProp = (type, obj, name, defaultValue) => {
const value = obj[name]
if (
value == null ||
value === '' // do not warn on this trivial and minor error
) {
return defaultValue
}
try {
return JSON.parse(value)
} catch (error) {
console.warn('cannot parse %ss[%j].%s (%s):', type, obj.id, name, prettyFormat(value), error)
return defaultValue
}
}

42
src/node_modules/constant-stream.js generated vendored Normal file
View File

@@ -0,0 +1,42 @@
import from2 from 'from2'
const constantStream = (data, n = 1) => {
if (!Buffer.isBuffer(data)) {
data = Buffer.from(data)
}
const { length } = data
if (!length) {
throw new Error('data should not be empty')
}
n *= length
let currentLength = length
return from2((size, next) => {
if (n <= 0) {
return next(null, null)
}
if (n < size) {
size = n
}
if (size < currentLength) {
const m = Math.floor(size / length) * length || length
n -= m
return next(null, data.slice(0, m))
}
// if more than twice the data length is requested, repeat the data
if (size > currentLength * 2) {
currentLength = Math.floor(size / length) * length
data = Buffer.alloc(currentLength, data)
}
n -= currentLength
return next(null, data)
})
}
export { constantStream as default }

View File

@@ -56,7 +56,7 @@ export default function proxyConsole (ws, vmConsole, sessionId) {
.on('message', data => {
if (!closed) {
// Decode from base 64.
socket.write(new Buffer(data, 'base64'))
socket.write(Buffer.from(data, 'base64'))
}
})
.on('close', () => {

View File

@@ -112,48 +112,64 @@ export default class RemoteHandlerAbstract {
throw new Error('Not implemented')
}
async createReadStream (file, {
createReadStream (file, {
checksum = false,
ignoreMissingChecksum = false,
...options
} = {}) {
const streamP = this._createReadStream(file, options).then(async stream => {
await eventToPromise(stream, 'readable')
const streamP = this._createReadStream(file, options).then(stream => {
// detect early errors
let promise = eventToPromise(stream, 'readable')
if (stream.length === undefined) {
stream.length = await this.getSize(file)::pCatch(noop)
// try to add the length prop if missing and not a range stream
if (
stream.length === undefined &&
options.end === undefined &&
options.start === undefined
) {
promise = Promise.all([ promise, this.getSize(file).then(size => {
stream.length = size
}, noop) ])
}
return stream
return promise.then(() => stream)
})
if (!checksum) {
return streamP
}
try {
checksum = await this.readFile(`${file}.checksum`)
} catch (error) {
if (error.code === 'ENOENT' && ignoreMissingChecksum) {
return streamP
// avoid a unhandled rejection warning
streamP.catch(noop)
return this.readFile(`${file}.checksum`).then(
checksum => streamP.then(stream => {
const { length } = stream
stream = validChecksumOfReadStream(stream, String(checksum).trim())
stream.length = length
return stream
}),
error => {
if (ignoreMissingChecksum && error && error.code === 'ENOENT') {
return streamP
}
throw error
}
throw error
}
let stream = await streamP
const { length } = stream
stream = validChecksumOfReadStream(stream, checksum.toString())
stream.length = length
return stream
)
}
async _createReadStream (file, options) {
throw new Error('Not implemented')
}
async refreshChecksum (path) {
const stream = addChecksumToReadStream(await this.createReadStream(path))
stream.resume() // start reading the whole file
const checksum = await stream.checksum
await this.outputFile(`${path}.checksum`, checksum)
}
async createOutputStream (file, {
checksum = false,
...options

View File

@@ -86,5 +86,4 @@ export default class LocalHandler extends RemoteHandlerAbstract {
const stats = await fs.stat(this._getFilePath(file))
return stats.size
}
}

View File

@@ -12,7 +12,7 @@ export default class NfsHandler extends LocalHandler {
}
_getRealPath () {
return `/tmp/xo-server/mounts/${this._remote.id}`
return `/run/xo-server/mounts/${this._remote.id}`
}
async _loadRealMounts () {

View File

@@ -27,6 +27,10 @@ export default {
},
paramsVector: {
type: 'object'
},
timeout: {
type: 'number',
description: 'number of milliseconds after which the job is considered failed'
}
},
required: [

View File

@@ -20,7 +20,7 @@ export default {
},
unloadable: {
type: 'boolean',
default: 'true',
default: true,
description: 'whether or not this plugin can be unloaded'
},
configuration: {
@@ -30,6 +30,14 @@ export default {
configurationSchema: {
$ref: 'http://json-schema.org/draft-04/schema#',
description: 'configuration schema for this plugin (not present if not configurable)'
},
testable: {
type: 'boolean',
description: 'whether or not this plugin can be tested'
},
testSchema: {
$ref: 'http://json-schema.org/draft-04/schema#',
description: 'test schema for this plugin'
}
},
required: [

View File

@@ -0,0 +1,44 @@
import assert from 'assert'
const streamToExistingBuffer = (
stream,
buffer,
offset = 0,
end = buffer.length
) => new Promise((resolve, reject) => {
assert(offset >= 0)
assert(end > offset)
assert(end <= buffer.length)
let i = offset
const onData = chunk => {
const prev = i
i += chunk.length
if (i > end) {
return onError(new Error('too much data'))
}
chunk.copy(buffer, prev)
}
stream.on('data', onData)
const clean = () => {
stream.removeListener('data', onData)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
const onEnd = () => {
resolve(i - offset)
clean()
}
stream.on('end', onEnd)
const onError = error => {
reject(error)
clean()
}
stream.on('error', onError)
})
export { streamToExistingBuffer as default }

View File

@@ -0,0 +1,20 @@
/* eslint-env jest */
import { createReadStream, readFile } from 'fs'
import { fromCallback } from 'promise-toolbox'
import streamToExistingBuffer from './stream-to-existing-buffer'
describe('streamToExistingBuffer()', () => {
it('read the content of a stream in a buffer', async () => {
const stream = createReadStream(__filename)
const expected = await fromCallback(cb => readFile(__filename, 'utf-8', cb))
const buf = Buffer.allocUnsafe(expected.length + 1)
buf[0] = 'A'.charCodeAt()
await streamToExistingBuffer(stream, buf, 1)
expect(String(buf)).toBe(`A${expected}`)
})
})

View File

@@ -0,0 +1,27 @@
const streamToNewBuffer = stream => new Promise((resolve, reject) => {
const chunks = []
let length = 0
const onData = chunk => {
chunks.push(chunk)
length += chunk.length
}
stream.on('data', onData)
const clean = () => {
stream.removeListener('data', onData)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
const onEnd = () => {
resolve(Buffer.concat(chunks, length))
clean()
}
stream.on('end', onEnd)
const onError = error => {
reject(error)
clean()
}
stream.on('error', onError)
})
export { streamToNewBuffer as default }

View File

@@ -1,7 +1,6 @@
import base64url from 'base64url'
import eventToPromise from 'event-to-promise'
import forEach from 'lodash/forEach'
import getStream from 'get-stream'
import has from 'lodash/has'
import highland from 'highland'
import humanFormat from 'human-format'
@@ -12,17 +11,23 @@ import keys from 'lodash/keys'
import kindOf from 'kindof'
import multiKeyHashInt from 'multikey-hash'
import pick from 'lodash/pick'
import tmp from 'tmp'
import xml2js from 'xml2js'
import { resolve } from 'path'
// Moment timezone can be loaded only one time, it's a workaround to load
// the latest version because cron module uses an old version of moment which
// does not implement `guess` function for example.
import 'moment-timezone'
import through2 from 'through2'
import { CronJob } from 'cron'
import { Readable } from 'stream'
import { utcFormat, utcParse } from 'd3-time-format'
import {
all as pAll,
defer,
fromCallback,
promisify,
reflect as pReflect
} from 'promise-toolbox'
@@ -30,9 +35,6 @@ import {
createHash,
randomBytes
} from 'crypto'
import { Readable } from 'stream'
import through2 from 'through2'
import {utcFormat as d3TimeFormat} from 'd3-time-format'
// ===================================================================
@@ -54,7 +56,7 @@ export function bufferToStream (buf) {
return stream
}
export const streamToBuffer = getStream.buffer
export streamToBuffer from './stream-to-new-buffer'
// -------------------------------------------------------------------
@@ -207,7 +209,7 @@ export const getUserPublicProperties = user => pick(
// -------------------------------------------------------------------
export const getPseudoRandomBytes = n => {
const bytes = new Buffer(n)
const bytes = Buffer.allocUnsafe(n)
const odd = n & 1
for (let i = 0, m = n - odd; i < m; i += 2) {
@@ -226,7 +228,7 @@ export const generateUnsecureToken = (n = 32) => base64url(getPseudoRandomBytes(
// Generate a secure random Base64 string.
export const generateToken = (randomBytes => {
return (n = 32) => randomBytes(n).then(base64url)
})(randomBytes::promisify())
})(promisify(randomBytes))
// -------------------------------------------------------------------
@@ -391,7 +393,9 @@ export const popProperty = obj => {
// Format a date in ISO 8601 in a safe way to be used in filenames
// (even on Windows).
export const safeDateFormat = d3TimeFormat('%Y%m%dT%H%M%SZ')
export const safeDateFormat = utcFormat('%Y%m%dT%H%M%SZ')
export const safeDateParse = utcParse('%Y%m%dT%H%M%SZ')
// -------------------------------------------------------------------
@@ -450,7 +454,7 @@ export function map (
export const multiKeyHash = (...args) => new Promise(resolve => {
const hash = multiKeyHashInt(...args)
const buf = new Buffer(4)
const buf = Buffer.allocUnsafe(4)
buf.writeUInt32LE(hash, 0)
resolve(base64url(buf))
@@ -458,6 +462,11 @@ export const multiKeyHash = (...args) => new Promise(resolve => {
// -------------------------------------------------------------------
export const resolveSubpath = (root, path) =>
resolve(root, `./${resolve('/', path)}`)
// -------------------------------------------------------------------
export const streamToArray = (stream, {
filter,
mapper
@@ -489,7 +498,7 @@ export const scheduleFn = (cronTime, fn, timeZone) => {
try {
await fn()
} catch (error) {
console.error('[WARN] scheduled function:', error && error.stack || error)
console.error('[WARN] scheduled function:', (error && error.stack) || error)
} finally {
running = false
}
@@ -529,7 +538,7 @@ export const thunkToArray = thunk => {
// ```js
// promise.catch(throwFn('an error has occured'))
//
// function foo (param = throwFn('param is required')) {}
// function foo (param = throwFn('param is required')()) {}
// ```
export const throwFn = error => () => {
throw (
@@ -541,5 +550,32 @@ export const throwFn = error => () => {
// -------------------------------------------------------------------
export const tmpDir = () => fromCallback(cb => tmp.dir(cb))
// -------------------------------------------------------------------
// Wrap a value in a function.
export const wrap = value => () => value
// -------------------------------------------------------------------
export const mapFilter = (collection, iteratee) => {
const result = []
forEach(collection, (...args) => {
const value = iteratee(...args)
if (value) {
result.push(value)
}
})
return result
}
// -------------------------------------------------------------------
export const splitFirst = (string, separator) => {
const i = string.indexOf(separator)
return i === -1 ? null : [
string.slice(0, i),
string.slice(i + separator.length)
]
}

View File

@@ -1,8 +1,4 @@
/* eslint-env mocha */
import expect from 'must'
// ===================================================================
/* eslint-env jest */
import {
camelToSnakeCase,
@@ -20,38 +16,34 @@ import {
describe('camelToSnakeCase()', function () {
it('converts a string from camelCase to snake_case', function () {
expect(camelToSnakeCase('fooBar')).to.equal('foo_bar')
expect(camelToSnakeCase('ipv4Allowed')).to.equal('ipv4_allowed')
expect(camelToSnakeCase('fooBar')).toBe('foo_bar')
expect(camelToSnakeCase('ipv4Allowed')).toBe('ipv4_allowed')
})
it('does not alter snake_case strings', function () {
expect(camelToSnakeCase('foo_bar')).to.equal('foo_bar')
expect(camelToSnakeCase('ipv4_allowed')).to.equal('ipv4_allowed')
expect(camelToSnakeCase('foo_bar')).toBe('foo_bar')
expect(camelToSnakeCase('ipv4_allowed')).toBe('ipv4_allowed')
})
it('does not alter upper case letters expect those from the camelCase', function () {
expect(camelToSnakeCase('fooBar_BAZ')).to.equal('foo_bar_BAZ')
expect(camelToSnakeCase('fooBar_BAZ')).toBe('foo_bar_BAZ')
})
})
// -------------------------------------------------------------------
describe('createRawObject()', () => {
it('returns an object', () => {
expect(createRawObject()).to.be.an.object()
})
it('returns an empty object', () => {
expect(createRawObject()).to.be.empty()
expect(createRawObject()).toEqual({})
})
it('creates a new object each time', () => {
expect(createRawObject()).to.not.equal(createRawObject())
expect(createRawObject()).not.toBe(createRawObject())
})
if (Object.getPrototypeOf) {
it('creates an object without a prototype', () => {
expect(Object.getPrototypeOf(createRawObject())).to.be.null()
expect(Object.getPrototypeOf(createRawObject())).toBe(null)
})
}
})
@@ -63,7 +55,7 @@ describe('diffItems', () => {
expect(diffItems(
['foo', 'bar'],
['baz', 'foo']
)).to.eql([
)).toEqual([
['bar'],
['baz']
])
@@ -76,17 +68,17 @@ describe('ensureArray()', function () {
it('wrap the value in an array', function () {
const value = 'foo'
expect(ensureArray(value)).to.eql([value])
expect(ensureArray(value)).toEqual([value])
})
it('returns an empty array for undefined', function () {
expect(ensureArray(undefined)).to.eql([])
expect(ensureArray(undefined)).toEqual([])
})
it('returns the object itself if is already an array', function () {
const array = ['foo', 'bar', 'baz']
expect(ensureArray(array)).to.equal(array)
expect(ensureArray(array)).toBe(array)
})
})
@@ -97,15 +89,15 @@ describe('extractProperty()', function () {
const value = {}
const obj = { prop: value }
expect(extractProperty(obj, 'prop')).to.equal(value)
expect(extractProperty(obj, 'prop')).toBe(value)
})
it('removes the property from the object', function () {
const value = {}
const obj = { prop: value }
expect(extractProperty(obj, 'prop')).to.equal(value)
expect(obj).to.not.have.property('prop')
expect(extractProperty(obj, 'prop')).toBe(value)
expect(obj.prop).not.toBeDefined()
})
})
@@ -120,7 +112,7 @@ describe('formatXml()', function () {
{$: {baz: 'plip'}}
]
}
})).to.equal(`<foo>
})).toBe(`<foo>
<bar baz="plop"/>
<bar baz="plip"/>
</foo>`)
@@ -131,7 +123,7 @@ describe('formatXml()', function () {
describe('generateToken()', () => {
it('generates a string', async () => {
expect(await generateToken()).to.be.a.string()
expect(typeof await generateToken()).toBe('string')
})
})
@@ -139,21 +131,21 @@ describe('generateToken()', () => {
describe('parseSize()', function () {
it('parses a human size', function () {
expect(parseSize('1G')).to.equal(1e9)
expect(parseSize('1G')).toBe(1e9)
})
it('returns the parameter if already a number', function () {
expect(parseSize(1e6)).to.equal(1e6)
expect(parseSize(1e6)).toBe(1e6)
})
it('throws if the string cannot be parsed', function () {
expect(function () {
parseSize('foo')
}).to.throw()
}).toThrow()
})
it('supports the B unit as suffix', function () {
expect(parseSize('3MB')).to.equal(3e6)
expect(parseSize('3MB')).toBe(3e6)
})
})
@@ -161,6 +153,7 @@ describe('parseSize()', function () {
describe('pSettle()', () => {
it('works with arrays', async () => {
const rejection = 'fatality'
const [
status1,
status2,
@@ -168,27 +161,29 @@ describe('pSettle()', () => {
] = await pSettle([
Promise.resolve(42),
Math.PI,
Promise.reject('fatality')
Promise.reject(rejection)
])
expect(status1.isRejected()).to.equal(false)
expect(status2.isRejected()).to.equal(false)
expect(status3.isRejected()).to.equal(true)
expect(status1.isRejected()).toBe(false)
expect(status2.isRejected()).toBe(false)
expect(status3.isRejected()).toBe(true)
expect(status1.isFulfilled()).to.equal(true)
expect(status2.isFulfilled()).to.equal(true)
expect(status3.isFulfilled()).to.equal(false)
expect(status1.isFulfilled()).toBe(true)
expect(status2.isFulfilled()).toBe(true)
expect(status3.isFulfilled()).toBe(false)
expect(status1.value()).to.equal(42)
expect(status2.value()).to.equal(Math.PI)
expect(::status3.value).to.throw()
expect(status1.value()).toBe(42)
expect(status2.value()).toBe(Math.PI)
expect(::status3.value).toThrow()
expect(::status1.reason).to.throw()
expect(::status2.reason).to.throw()
expect(status3.reason()).to.equal('fatality')
expect(::status1.reason).toThrow()
expect(::status2.reason).toThrow()
expect(status3.reason()).toBe(rejection)
})
it('works with objects', async () => {
const rejection = 'fatality'
const {
a: status1,
b: status2,
@@ -196,23 +191,23 @@ describe('pSettle()', () => {
} = await pSettle({
a: Promise.resolve(42),
b: Math.PI,
c: Promise.reject('fatality')
c: Promise.reject(rejection)
})
expect(status1.isRejected()).to.equal(false)
expect(status2.isRejected()).to.equal(false)
expect(status3.isRejected()).to.equal(true)
expect(status1.isRejected()).toBe(false)
expect(status2.isRejected()).toBe(false)
expect(status3.isRejected()).toBe(true)
expect(status1.isFulfilled()).to.equal(true)
expect(status2.isFulfilled()).to.equal(true)
expect(status3.isFulfilled()).to.equal(false)
expect(status1.isFulfilled()).toBe(true)
expect(status2.isFulfilled()).toBe(true)
expect(status3.isFulfilled()).toBe(false)
expect(status1.value()).to.equal(42)
expect(status2.value()).to.equal(Math.PI)
expect(::status3.value).to.throw()
expect(status1.value()).toBe(42)
expect(status2.value()).toBe(Math.PI)
expect(::status3.value).toThrow()
expect(::status1.reason).to.throw()
expect(::status2.reason).to.throw()
expect(status3.reason()).to.equal('fatality')
expect(::status1.reason).toThrow()
expect(::status2.reason).toThrow()
expect(status3.reason()).toBe(rejection)
})
})

View File

@@ -1,4 +1,10 @@
import fu from 'struct-fu'
// TODO: remove once completely merged in vhd.js
import assert from 'assert'
import constantStream from 'constant-stream'
import eventToPromise from 'event-to-promise'
import fu from '@nraynaud/struct-fu'
import isEqual from 'lodash/isEqual'
import {
noop,
@@ -39,6 +45,10 @@ const HARD_DISK_TYPE_DIFFERENCING = 4 // Delta backup.
const BLOCK_UNUSED = 0xFFFFFFFF
const BIT_MASK = 0x80
// unused block as buffer containing a uint32BE
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(VHD_ENTRY_SIZE)
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
// ===================================================================
const fuFooter = fu.struct([
@@ -91,7 +101,7 @@ const fuHeader = fu.struct([
fu.uint8('parentUuid', 16),
fu.uint32('parentTimestamp'),
fu.uint32('reserved1'),
fu.char('parentUnicodeName', 512),
fu.char16be('parentUnicodeName', 512),
fu.struct('parentLocatorEntry', [
fu.uint32('platformCode'),
fu.uint32('platformDataSpace'),
@@ -144,40 +154,28 @@ const unpackField = (field, buf) => {
}
// ===================================================================
// Returns the checksum of a raw footer.
// The raw footer is altered with the new sum.
function checksumFooter (rawFooter) {
const checksumField = fuFooter.fields.checksum
// Returns the checksum of a raw struct.
// The raw struct (footer or header) is altered with the new sum.
function checksumStruct (rawStruct, struct) {
const checksumField = struct.fields.checksum
let sum = 0
// Reset current sum.
packField(checksumField, 0, rawFooter)
packField(checksumField, 0, rawStruct)
for (let i = 0; i < VHD_FOOTER_SIZE; i++) {
sum = (sum + rawFooter[i]) & 0xFFFFFFFF
for (let i = 0, n = struct.size; i < n; i++) {
sum = (sum + rawStruct[i]) & 0xFFFFFFFF
}
sum = 0xFFFFFFFF - sum
// Write new sum.
packField(checksumField, sum, rawFooter)
packField(checksumField, sum, rawStruct)
return sum
}
function getParentLocatorSize (parentLocatorEntry) {
const { platformDataSpace } = parentLocatorEntry
if (platformDataSpace < VHD_SECTOR_SIZE) {
return sectorsToBytes(platformDataSpace)
}
return (platformDataSpace % VHD_SECTOR_SIZE === 0)
? platformDataSpace
: 0
}
// ===================================================================
class Vhd {
@@ -190,6 +188,17 @@ class Vhd {
// Read functions.
// =================================================================
_readStream (start, n) {
return this._handler.createReadStream(this._path, {
start,
end: start + n - 1 // end is inclusive
})
}
_read (start, n) {
return this._readStream(start, n).then(streamToBuffer)
}
// Returns the first address after metadata. (In bytes)
getEndOfHeaders () {
const { header } = this
@@ -207,10 +216,10 @@ class Vhd {
const entry = header.parentLocatorEntry[i]
if (entry.platformCode !== VHD_PLATFORM_CODE_NONE) {
const dataOffset = uint32ToUint64(entry.platformDataOffset)
// Max(end, locator end)
end = Math.max(end, dataOffset + getParentLocatorSize(entry))
end = Math.max(end,
uint32ToUint64(entry.platformDataOffset) +
sectorsToBytes(entry.platformDataSpace)
)
}
}
@@ -221,17 +230,15 @@ class Vhd {
// Returns the first sector after data.
getEndOfData () {
let end = Math.floor(this.getEndOfHeaders() / VHD_SECTOR_SIZE)
let end = Math.ceil(this.getEndOfHeaders() / VHD_SECTOR_SIZE)
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
const { maxTableEntries } = this.header
for (let i = 0; i < maxTableEntries; i++) {
let blockAddr = this.readAllocationTableEntry(i)
const blockAddr = this._getBatEntry(i)
if (blockAddr !== BLOCK_UNUSED) {
// Compute next block address.
blockAddr += this.sectorsPerBlock + this.sectorsOfBitmap
end = Math.max(end, blockAddr)
end = Math.max(end, blockAddr + fullBlockSize)
}
}
@@ -240,24 +247,12 @@ class Vhd {
return sectorsToBytes(end)
}
// Returns the start position of the vhd footer.
// The real footer, not the copy at the beginning of the vhd file.
async getFooterStart () {
const stats = await this._handler.getSize(this._path)
return stats.size - VHD_FOOTER_SIZE
}
// Get the beginning (footer + header) of a vhd file.
async readHeaderAndFooter () {
const buf = await streamToBuffer(
await this._handler.createReadStream(this._path, {
start: 0,
end: VHD_FOOTER_SIZE + VHD_HEADER_SIZE - 1
})
)
const buf = await this._read(0, VHD_FOOTER_SIZE + VHD_HEADER_SIZE)
const sum = unpackField(fuFooter.fields.checksum, buf)
const sumToTest = checksumFooter(buf)
const sumToTest = checksumStruct(buf, fuFooter)
// Checksum child & parent.
if (sumToTest !== sum) {
@@ -297,119 +292,176 @@ class Vhd {
sectorsRoundUpNoZero(header.maxTableEntries * VHD_ENTRY_SIZE)
)
this.blockTable = await streamToBuffer(
await this._handler.createReadStream(this._path, {
start: offset,
end: offset + size - 1
})
)
this.blockTable = await this._read(offset, size)
}
// Returns the address block at the entry location of one table.
readAllocationTableEntry (entry) {
return this.blockTable.readUInt32BE(entry * VHD_ENTRY_SIZE)
// return the first sector (bitmap) of a block
_getBatEntry (block) {
return this.blockTable.readUInt32BE(block * VHD_ENTRY_SIZE)
}
// Returns the data content of a block. (Not the bitmap !)
async readBlockData (blockAddr) {
const { blockSize } = this.header
const handler = this._handler
const path = this._path
const blockDataAddr = sectorsToBytes(blockAddr + this.sectorsOfBitmap)
const footerStart = await this.getFooterStart()
const isPadded = footerStart < (blockDataAddr + blockSize)
// Size ot the current block in the vhd file.
const size = isPadded ? (footerStart - blockDataAddr) : sectorsToBytes(this.sectorsPerBlock)
debug(`Read block data at: ${blockDataAddr}. (size=${size})`)
const buf = await streamToBuffer(
await handler.createReadStream(path, {
start: blockDataAddr,
end: blockDataAddr + size - 1
})
)
// Padded by zero !
if (isPadded) {
return Buffer.concat([buf, new Buffer(blockSize - size).fill(0)])
_readBlock (blockId, onlyBitmap = false) {
const blockAddr = this._getBatEntry(blockId)
if (blockAddr === BLOCK_UNUSED) {
throw new Error(`no such block ${blockId}`)
}
return buf
return this._read(
sectorsToBytes(blockAddr),
onlyBitmap ? this.bitmapSize : this.fullBlockSize
).then(buf => onlyBitmap
? { bitmap: buf }
: {
bitmap: buf.slice(0, this.bitmapSize),
data: buf.slice(this.bitmapSize)
}
)
}
// Returns a buffer that contains the bitmap of a block.
// get the identifiers and first sectors of the first and last block
// in the file
//
// TODO: merge with readBlockData().
async readBlockBitmap (blockAddr) {
const { bitmapSize } = this
const offset = sectorsToBytes(blockAddr)
// return undefined if none
_getFirstAndLastBlocks () {
const n = this.header.maxTableEntries
const bat = this.blockTable
let i = 0
let j = 0
let first, firstSector, last, lastSector
debug(`Read bitmap at: ${offset}. (size=${bitmapSize})`)
// get first allocated block for initialization
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
i += 1
j += VHD_ENTRY_SIZE
return streamToBuffer(
await this._handler.createReadStream(this._path, {
start: offset,
end: offset + bitmapSize - 1
})
)
if (i === n) {
return
}
}
lastSector = firstSector
first = last = i
while (i < n) {
const sector = bat.readUInt32BE(j)
if (sector !== BLOCK_UNUSED) {
if (sector < firstSector) {
first = i
firstSector = sector
} else if (sector > lastSector) {
last = i
lastSector = sector
}
}
i += 1
j += VHD_ENTRY_SIZE
}
return { first, firstSector, last, lastSector }
}
// =================================================================
// Write functions.
// =================================================================
// Write a buffer at a given position in a vhd file.
async _write (buffer, offset) {
// Write a buffer/stream at a given position in a vhd file.
_write (data, offset) {
debug(`_write offset=${offset} size=${Buffer.isBuffer(data) ? data.length : '???'}`)
// TODO: could probably be merged in remote handlers.
return this._handler.createOutputStream(this._path, {
start: offset,
flags: 'r+'
}).then(stream => new Promise((resolve, reject) => {
stream.on('error', reject)
stream.write(buffer, () => {
stream.end()
resolve()
})
}))
flags: 'r+',
start: offset
}).then(
Buffer.isBuffer(data)
? stream => new Promise((resolve, reject) => {
stream.on('error', reject)
stream.end(data, resolve)
})
: stream => eventToPromise(data.pipe(stream), 'finish')
)
}
// Write an entry in the allocation table.
writeAllocationTableEntry (entry, value) {
this.blockTable.writeUInt32BE(value, entry * VHD_ENTRY_SIZE)
async ensureBatSize (size) {
const { header } = this
const prevMaxTableEntries = header.maxTableEntries
if (prevMaxTableEntries >= size) {
return
}
const tableOffset = uint32ToUint64(header.tableOffset)
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
// extend BAT
const maxTableEntries = header.maxTableEntries = size
const batSize = maxTableEntries * VHD_ENTRY_SIZE
const prevBat = this.blockTable
const bat = this.blockTable = Buffer.allocUnsafe(batSize)
prevBat.copy(bat)
bat.fill(BUF_BLOCK_UNUSED, prevBat.length)
debug(`ensureBatSize: extend in memory BAT ${prevMaxTableEntries} -> ${maxTableEntries}`)
const extendBat = () => {
debug(`ensureBatSize: extend in file BAT ${prevMaxTableEntries} -> ${maxTableEntries}`)
return this._write(
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
tableOffset + prevBat.length
)
}
if (tableOffset + batSize < sectorsToBytes(firstSector)) {
return Promise.all([
extendBat(),
this.writeHeader()
])
}
const { fullBlockSize } = this
const newFirstSector = lastSector + fullBlockSize / VHD_SECTOR_SIZE
debug(`ensureBatSize: move first block ${firstSector} -> ${newFirstSector}`)
return Promise.all([
// copy the first block at the end
this._readStream(sectorsToBytes(firstSector), fullBlockSize).then(stream =>
this._write(stream, sectorsToBytes(newFirstSector))
).then(extendBat),
this._setBatEntry(first, newFirstSector),
this.writeHeader(),
this.writeFooter()
])
}
// set the first sector (bitmap) of a block
_setBatEntry (block, blockSector) {
const i = block * VHD_ENTRY_SIZE
const { blockTable } = this
blockTable.writeUInt32BE(blockSector, i)
return this._write(
blockTable.slice(i, i + VHD_ENTRY_SIZE),
uint32ToUint64(this.header.tableOffset) + i
)
}
// Make a new empty block at vhd end.
// Update block allocation table in context and in file.
async createBlock (blockId) {
// End of file !
let offset = this.getEndOfData()
const blockAddr = Math.ceil(this.getEndOfData() / VHD_SECTOR_SIZE)
// Padded on bound sector.
if (offset % VHD_SECTOR_SIZE) {
offset += (VHD_SECTOR_SIZE - (offset % VHD_SECTOR_SIZE))
}
debug(`create block ${blockId} at ${blockAddr}`)
const blockAddr = Math.floor(offset / VHD_SECTOR_SIZE)
await Promise.all([
// Write an empty block and addr in vhd file.
this._write(
constantStream([ 0 ], this.fullBlockSize),
sectorsToBytes(blockAddr)
),
const {
blockTable,
fullBlockSize
} = this
debug(`Create block at ${blockAddr}. (size=${fullBlockSize}, offset=${offset})`)
// New entry in block allocation table.
this.writeAllocationTableEntry(blockId, blockAddr)
const tableOffset = uint32ToUint64(this.header.tableOffset)
const entry = blockId * VHD_ENTRY_SIZE
// Write an empty block and addr in vhd file.
await this._write(new Buffer(fullBlockSize).fill(0), offset)
await this._write(blockTable.slice(entry, entry + VHD_ENTRY_SIZE), tableOffset + entry)
this._setBatEntry(blockId, blockAddr)
])
return blockAddr
}
@@ -428,17 +480,16 @@ class Vhd {
await this._write(bitmap, sectorsToBytes(blockAddr))
}
async writeBlockSectors (block, beginSectorId, n) {
let blockAddr = this.readAllocationTableEntry(block.id)
async writeBlockSectors (block, beginSectorId, endSectorId) {
let blockAddr = this._getBatEntry(block.id)
if (blockAddr === BLOCK_UNUSED) {
blockAddr = await this.createBlock(block.id)
}
const endSectorId = beginSectorId + n
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
debug(`Write block data at: ${offset}. (counter=${n}, blockId=${block.id}, blockSector=${beginSectorId})`)
debug(`writeBlockSectors at ${offset} block=${block.id}, sectors=${beginSectorId}...${endSectorId}`)
await this._write(
block.data.slice(
@@ -448,7 +499,7 @@ class Vhd {
sectorsToBytes(offset)
)
const bitmap = await this.readBlockBitmap(this.bitmapSize, blockAddr)
const { bitmap } = await this._readBlock(block.id, true)
for (let i = beginSectorId; i < endSectorId; ++i) {
mapSetBit(bitmap, i)
@@ -458,61 +509,69 @@ class Vhd {
}
// Merge block id (of vhd child) into vhd parent.
async coalesceBlock (child, blockAddr, blockId) {
async coalesceBlock (child, blockId) {
// Get block data and bitmap of block id.
const blockData = await child.readBlockData(blockAddr)
const blockBitmap = await child.readBlockBitmap(blockAddr)
const { bitmap, data } = await child._readBlock(blockId)
debug(`Coalesce block ${blockId} at ${blockAddr}.`)
debug(`coalesceBlock block=${blockId}`)
// For each sector of block data...
const { sectorsPerBlock } = child
for (let i = 0; i < sectorsPerBlock; i++) {
// If no changes on one sector, skip.
if (!mapTestBit(blockBitmap, i)) {
if (!mapTestBit(bitmap, i)) {
continue
}
let sectors = 0
let endSector = i + 1
// Count changed sectors.
for (; sectors + i < sectorsPerBlock; sectors++) {
if (!mapTestBit(blockBitmap, sectors + i)) {
break
}
while (endSector < sectorsPerBlock && mapTestBit(bitmap, endSector)) {
++endSector
}
// Write n sectors into parent.
debug(`Coalesce block: write. (offset=${i}, sectors=${sectors})`)
debug(`coalesceBlock: write sectors=${i}...${endSector}`)
await this.writeBlockSectors(
{ id: blockId, data: blockData },
{ id: blockId, data },
i,
sectors
endSector
)
i += sectors
i = endSector
}
}
// Write a context footer. (At the end and beggining of a vhd file.)
// Write a context footer. (At the end and beginning of a vhd file.)
async writeFooter () {
const { footer } = this
const offset = this.getEndOfData()
const rawFooter = fuFooter.pack(footer)
footer.checksum = checksumFooter(rawFooter)
footer.checksum = checksumStruct(rawFooter, fuFooter)
debug(`Write footer at: ${offset} (checksum=${footer.checksum}). (data=${rawFooter.toString('hex')})`)
await this._write(rawFooter, 0)
await this._write(rawFooter, offset)
}
writeHeader () {
const { header } = this
const rawHeader = fuHeader.pack(header)
header.checksum = checksumStruct(rawHeader, fuHeader)
const offset = VHD_FOOTER_SIZE
debug(`Write header at: ${offset} (checksum=${header.checksum}). (data=${rawHeader.toString('hex')})`)
return this._write(rawHeader, offset)
}
}
// Merge vhd child into vhd parent.
//
// Child must be a delta backup !
// Parent must be a full backup !
//
// TODO: update the identifier of the parent VHD.
export default async function vhdMerge (
parentHandler, parentPath,
childHandler, childPath
@@ -526,6 +585,8 @@ export default async function vhdMerge (
childVhd.readHeaderAndFooter()
])
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
// Child must be a delta.
if (childVhd.footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
throw new Error('Unable to merge, child is not a delta backup.')
@@ -550,17 +611,66 @@ export default async function vhdMerge (
childVhd.readBlockTable()
])
for (let blockId = 0; blockId < childVhd.header.maxTableEntries; blockId++) {
const blockAddr = childVhd.readAllocationTableEntry(blockId)
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
if (blockAddr !== BLOCK_UNUSED) {
await parentVhd.coalesceBlock(
childVhd,
blockAddr,
blockId
)
for (let blockId = 0; blockId < childVhd.header.maxTableEntries; blockId++) {
if (childVhd._getBatEntry(blockId) !== BLOCK_UNUSED) {
await parentVhd.coalesceBlock(childVhd, blockId)
}
}
const cFooter = childVhd.footer
const pFooter = parentVhd.footer
pFooter.currentSize = { ...cFooter.currentSize }
pFooter.diskGeometry = { ...cFooter.diskGeometry }
pFooter.originalSize = { ...cFooter.originalSize }
pFooter.timestamp = cFooter.timestamp
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
}
// returns true if the child was actually modified
export async function chainVhd (
parentHandler, parentPath,
childHandler, childPath
) {
const parentVhd = new Vhd(parentHandler, parentPath)
const childVhd = new Vhd(childHandler, childPath)
await Promise.all([
parentVhd.readHeaderAndFooter(),
childVhd.readHeaderAndFooter()
])
const { header } = childVhd
const parentName = parentPath.split('/').pop()
const parentUuid = parentVhd.footer.uuid
if (
header.parentUnicodeName !== parentName ||
!isEqual(header.parentUuid, parentUuid)
) {
header.parentUuid = parentUuid
header.parentUnicodeName = parentName
await childVhd.writeHeader()
return true
}
// The checksum was broken between xo-server v5.2.4 and v5.2.5
//
// Replace by a correct checksum if necessary.
//
// TODO: remove when enough time as passed (6 months).
{
const rawHeader = fuHeader.pack(header)
const checksum = checksumStruct(rawHeader, fuHeader)
if (checksum !== header.checksum) {
await childVhd._write(rawHeader, VHD_FOOTER_SIZE)
return true
}
}
return false
}

View File

@@ -1,8 +1,14 @@
import {
includes,
pickBy
} from 'lodash'
import {
ensureArray,
extractProperty,
forEach,
isArray,
isEmpty,
mapToArray,
parseXml
} from './utils'
@@ -139,6 +145,7 @@ const TRANSFORMS = {
? (isRunning ? 'Running' : 'Halted')
: 'Unknown',
startTime: toTimestamp(otherConfig.boot_time),
supplementalPacks: pickBy(obj.software_version, (value, key) => includes(key, ':')),
agentStartTime: toTimestamp(otherConfig.agent_start_time),
tags: obj.tags,
version: obj.software_version.product_version,
@@ -184,22 +191,33 @@ const TRANSFORMS = {
return false
}
const { PV_drivers_version: { major, minor } } = guestMetrics
if (major === undefined || minor === undefined) {
return false
}
const { major, minor } = guestMetrics.PV_drivers_version
const [ hostMajor, hostMinor ] = (obj.$resident_on || obj.$pool.$master)
.software_version
.product_version
.split('.')
return guestMetrics.PV_drivers_up_to_date
return major >= hostMajor && minor >= hostMinor
? 'up to date'
: 'out of date'
})()
let resourceSet = otherConfig['xo:resource_set']
if (resourceSet) {
try {
resourceSet = JSON.parse(resourceSet)
} catch (_) {
resourceSet = undefined
}
}
const vm = {
// type is redefined after for controllers/, templates &
// snapshots.
type: 'VM',
addresses: guestMetrics && guestMetrics.networks || null,
addresses: (guestMetrics && guestMetrics.networks) || null,
affinityHost: link(obj, 'affinity'),
auto_poweron: Boolean(otherConfig.auto_poweron),
boot: obj.HVM_boot_params,
CPUs: {
@@ -267,11 +285,13 @@ const TRANSFORMS = {
return memory
})(),
installTime: metrics && toTimestamp(metrics.install_time),
name_description: obj.name_description,
name_label: obj.name_label,
other: otherConfig,
os_version: guestMetrics && guestMetrics.os_version || null,
os_version: (guestMetrics && guestMetrics.os_version) || null,
power_state: obj.power_state,
resourceSet,
snapshots: link(obj, 'snapshots'),
startTime: metrics && toTimestamp(metrics.start_time),
tags: obj.tags,
@@ -363,10 +383,13 @@ const TRANSFORMS = {
name_description: obj.name_description,
name_label: obj.name_label,
size: +obj.physical_size,
shared: Boolean(obj.shared),
SR_type: obj.type,
tags: obj.tags,
usage: +obj.virtual_allocation,
VDIs: link(obj, 'VDIs'),
other_config: obj.other_config,
sm_config: obj.sm_config,
$container: (
obj.shared || !obj.$PBDs[0]
@@ -383,26 +406,32 @@ const TRANSFORMS = {
return {
type: 'PBD',
attached: obj.currently_attached,
attached: Boolean(obj.currently_attached),
host: link(obj, 'host'),
SR: link(obj, 'SR')
SR: link(obj, 'SR'),
device_config: obj.device_config
}
},
// -----------------------------------------------------------------
pif (obj) {
const metrics = obj.$metrics
return {
type: 'PIF',
attached: Boolean(obj.currently_attached),
isBondMaster: !isEmpty(obj.bond_master_of),
device: obj.device,
deviceName: metrics && metrics.device_name,
dns: obj.DNS,
disallowUnplug: Boolean(obj.disallow_unplug),
gateway: obj.gateway,
ip: obj.IP,
mac: obj.MAC,
management: Boolean(obj.management), // TODO: find a better name.
carrier: Boolean(metrics && metrics.carrier),
mode: obj.ip_configuration_mode,
mtu: +obj.MTU,
netmask: obj.netmask,
@@ -488,6 +517,7 @@ const TRANSFORMS = {
MTU: +obj.MTU,
name_description: obj.name_description,
name_label: obj.name_label,
other_config: obj.other_config,
tags: obj.tags,
PIFs: link(obj, 'PIFs'),
VIFs: link(obj, 'VIFs')

View File

@@ -1,12 +1,22 @@
/* eslint-disable camelcase */
import every from 'lodash/every'
import deferrable from 'golike-defer'
import fatfs from 'fatfs'
import find from 'lodash/find'
import includes from 'lodash/includes'
import tarStream from 'tar-stream'
import vmdkToVhd from 'xo-vmdk-to-vhd'
import { defer } from 'promise-toolbox'
import { cancellable, defer } from 'promise-toolbox'
import { PassThrough } from 'stream'
import { forbiddenOperation } from 'xo-common/api-errors'
import {
every,
find,
filter,
flatten,
includes,
isEmpty,
omit,
startsWith,
uniq
} from 'lodash'
import {
wrapError as wrapXapiError,
Xapi as XapiBase
@@ -17,10 +27,7 @@ import {
import httpRequest from '../http-request'
import fatfsBuffer, { init as fatfsBufferInit } from '../fatfs-buffer'
import {
deferrable,
mixin
} from '../decorators'
import { mixin } from '../decorators'
import {
bufferToStream,
camelToSnakeCase,
@@ -38,9 +45,6 @@ import {
promisifyAll,
pSettle
} from '../utils'
import {
ForbiddenOperation
} from '../api-errors'
import mixins from './mixins'
import OTHER_CONFIG_TEMPLATE from './other-config-template'
@@ -53,6 +57,7 @@ import {
getNamespaceForType,
isVmHvm,
isVmRunning,
NULL_REF,
optional,
prepareXapiParam,
put
@@ -73,6 +78,9 @@ require('lodash/assign')(module.exports, require('./utils'))
export const VDI_FORMAT_VHD = 'vhd'
export const VDI_FORMAT_RAW = 'raw'
export const IPV4_CONFIG_MODES = ['None', 'DHCP', 'Static']
export const IPV6_CONFIG_MODES = ['None', 'DHCP', 'Static', 'Autoconf']
// ===================================================================
@mixin(mapToArray(mixins))
@@ -507,7 +515,7 @@ export default class Xapi extends XapiBase {
)
} finally {
if (snapshot) {
await this._deleteVm(snapshot, true)
await this._deleteVm(snapshot)
}
}
}
@@ -624,7 +632,7 @@ export default class Xapi extends XapiBase {
actions_after_crash,
actions_after_reboot,
actions_after_shutdown,
affinity: affinity == null ? 'OpaqueRef:NULL' : affinity,
affinity: affinity == null ? NULL_REF : affinity,
HVM_boot_params,
HVM_boot_policy,
is_a_template: asBoolean(is_a_template),
@@ -667,7 +675,7 @@ export default class Xapi extends XapiBase {
}))
}
async _deleteVm (vm, deleteDisks) {
async _deleteVm (vm, deleteDisks = true) {
debug(`Deleting VM ${vm.name_label}`)
// It is necessary for suspended VMs to be shut down
@@ -704,14 +712,14 @@ export default class Xapi extends XapiBase {
}))
}
await Promise.all(mapToArray(vm.$snapshots, snapshot => {
return this.deleteVm(snapshot.$id, true)::pCatch(noop)
}))
await Promise.all(mapToArray(vm.$snapshots, snapshot =>
this.deleteVm(snapshot.$id)::pCatch(noop)
))
await this.call('VM.destroy', vm.$ref)
}
async deleteVm (vmId, deleteDisks = false) {
async deleteVm (vmId, deleteDisks) {
return /* await */ this._deleteVm(
this.getObject(vmId),
deleteDisks
@@ -749,19 +757,21 @@ export default class Xapi extends XapiBase {
const taskRef = await this._createTask('VM Export', vm.name_label)
if (snapshotRef) {
this._watchTask(taskRef)::pFinally(() => {
this.deleteVm(snapshotRef, true)::pCatch(noop)
this.deleteVm(snapshotRef)::pCatch(noop)
})
}
return httpRequest({
hostname: host.address,
path: onlyMetadata ? '/export_metadata/' : '/export/',
protocol: 'https',
query: {
ref: snapshotRef || vm.$ref,
session_id: this.sessionId,
task_id: taskRef,
use_compression: compress ? 'true' : 'false'
}
},
rejectUnauthorized: false
})
}
@@ -774,7 +784,7 @@ export default class Xapi extends XapiBase {
disableBaseTags = false
} = {}) {
const vm = await this.snapshotVm(vmId)
$onFailure(() => this._deleteVm(vm, true))
$onFailure(() => this._deleteVm(vm))
if (snapshotNameLabel) {
this._setObjectProperties(vm, {
nameLabel: snapshotNameLabel
@@ -800,25 +810,38 @@ export default class Xapi extends XapiBase {
const vdis = {}
const vbds = {}
forEach(vm.$VBDs, vbd => {
const vdiId = vbd.VDI
if (!vdiId || vbd.type !== 'Disk') {
let vdi
if (
vbd.type !== 'Disk' ||
!(vdi = vbd.$VDI)
) {
// Ignore this VBD.
return
}
// If the VDI name start with `[NOBAK]`, do not export it.
if (startsWith(vdi.name_label, '[NOBAK]')) {
// FIXME: find a way to not create the VDI snapshot in the
// first time.
//
// The snapshot must not exist otherwise it could break the
// next export.
this._deleteVdi(vdi)::pCatch(noop)
return
}
vbds[vbd.$ref] = vbd
if (vdiId in vdis) {
const vdiRef = vdi.$ref
if (vdiRef in vdis) {
// This VDI has already been managed.
return
}
const vdi = vbd.$VDI
// Look for a snapshot of this vdi in the base VM.
const baseVdi = baseVdis[vdi.snapshot_of]
vdis[vdiId] = baseVdi && !disableBaseTags
vdis[vdiRef] = baseVdi && !disableBaseTags
? {
...vdi,
other_config: {
@@ -831,7 +854,7 @@ export default class Xapi extends XapiBase {
...vdi,
$SR$uuid: vdi.$SR.uuid
}
const stream = streams[`${vdiId}.vhd`] = this._exportVdi(vdi, baseVdi, VDI_FORMAT_VHD)
const stream = streams[`${vdiRef}.vhd`] = this._exportVdi(vdi, baseVdi, VDI_FORMAT_VHD)
$onFailure(() => stream.cancel())
})
@@ -856,7 +879,10 @@ export default class Xapi extends XapiBase {
[TAG_BASE_DELTA]: baseVm.uuid
}
}
: vm
: {
...vm,
other_config: omit(vm.other_config, TAG_BASE_DELTA)
}
}, 'streams', {
value: await streams::pAll()
})
@@ -905,7 +931,7 @@ export default class Xapi extends XapiBase {
is_a_template: false
})
)
$onFailure(() => this._deleteVm(vm, true))
$onFailure(() => this._deleteVm(vm))
await Promise.all([
this._setObjectProperties(vm, {
@@ -995,7 +1021,7 @@ export default class Xapi extends XapiBase {
// Create VIFs.
Promise.all(mapToArray(delta.vifs, vif => {
const network =
this.getObject(vif.$network$uuid, null) ||
(vif.$network$uuid && this.getObject(vif.$network$uuid, null)) ||
networksOnPoolMasterByDevice[vif.device] ||
defaultNetwork
@@ -1010,7 +1036,7 @@ export default class Xapi extends XapiBase {
])
if (deleteBase && baseVm) {
this._deleteVm(baseVm, true)::pCatch(noop)
this._deleteVm(baseVm)::pCatch(noop)
}
await Promise.all([
@@ -1081,6 +1107,94 @@ export default class Xapi extends XapiBase {
return loop()
}
async _createSuppPackVdi (stream, sr) {
const vdi = await this.createVdi(stream.length, {
sr: sr.$ref,
name_label: '[XO] Supplemental pack ISO',
name_description: 'small temporary VDI to store a supplemental pack ISO'
})
await this.importVdiContent(vdi.$id, stream, { format: VDI_FORMAT_RAW })
return vdi
}
@deferrable
async installSupplementalPack ($defer, stream, { hostId }) {
if (!stream.length) {
throw new Error('stream must have a length')
}
let sr = this.pool.$default_SR
if (!sr || sr.physical_size - sr.physical_utilisation < stream.length) {
sr = find(
mapToArray(this.getObject(hostId, 'host').$PBDs, '$SR'),
sr => sr && sr.content_type === 'user' && sr.physical_size - sr.physical_utilisation >= stream.length
)
if (!sr) {
throw new Error('no SR available to store installation file')
}
}
const vdi = await this._createSuppPackVdi(stream, sr)
$defer(() => this._deleteVdi(vdi))
await this.call('host.call_plugin', this.getObject(hostId).$ref, 'install-supp-pack', 'install', { vdi: vdi.uuid })
}
@deferrable
async installSupplementalPackOnAllHosts ($defer, stream) {
if (!stream.length) {
throw new Error('stream must have a length')
}
const isSrAvailable = sr =>
sr && sr.content_type === 'user' && sr.physical_size - sr.physical_utilisation >= stream.length
const hosts = filter(this.objects.all, { $type: 'host' })
// Try to find a shared SR
const sr = find(
filter(this.objects.all, { $type: 'sr', shared: true }),
isSrAvailable
)
// Shared SR available: create only 1 VDI for all the installations
if (sr) {
const vdi = await this._createSuppPackVdi(stream, sr)
$defer(() => this._deleteVdi(vdi))
// Install pack sequentially to prevent concurrent access to the unique VDI
for (const host of hosts) {
await this.call('host.call_plugin', host.$ref, 'install-supp-pack', 'install', { vdi: vdi.uuid })
}
return
}
// No shared SR available: find an available local SR on each host
return Promise.all(mapToArray(hosts, deferrable(async ($defer, host) => {
// pipe stream synchronously to several PassThroughs to be able to pipe them asynchronously later
const pt = stream.pipe(new PassThrough())
pt.length = stream.length
const sr = find(
mapToArray(host.$PBDs, '$SR'),
isSrAvailable
)
if (!sr) {
throw new Error('no SR available to store installation file')
}
const vdi = await this._createSuppPackVdi(pt, sr)
$defer(() => this._deleteVdi(vdi))
await this.call('host.call_plugin', host.$ref, 'install-supp-pack', 'install', { vdi: vdi.uuid })
})))
}
async _importVm (stream, sr, onlyMetadata = false, onVmCreation = undefined) {
const taskRef = await this._createTask('VM import')
const query = {
@@ -1112,7 +1226,9 @@ export default class Xapi extends XapiBase {
put(stream, {
hostname: host.address,
path,
query
protocol: 'https',
query,
rejectUnauthorized: false
})
])
@@ -1149,7 +1265,7 @@ export default class Xapi extends XapiBase {
VCPUs_max: nCpus
})
)
$onFailure(() => this._deleteVm(vm, true))
$onFailure(() => this._deleteVm(vm))
// Disable start and change the VM name label during import.
await Promise.all([
this.addForbiddenOperationToVm(vm.$id, 'start', 'OVA import in progress...'),
@@ -1339,7 +1455,7 @@ export default class Xapi extends XapiBase {
await this._startVm(this.getObject(vmId))
} catch (e) {
if (e.code === 'OPERATION_BLOCKED') {
throw new ForbiddenOperation('Start', e.params[1])
throw forbiddenOperation('Start', e.params[1])
}
throw e
@@ -1707,7 +1823,8 @@ export default class Xapi extends XapiBase {
return snap
}
async _exportVdi (vdi, base, format = VDI_FORMAT_VHD) {
@cancellable
async _exportVdi ($cancelToken, vdi, base, format = VDI_FORMAT_VHD) {
const host = vdi.$SR.$PBDs[0].$host
const taskRef = await this._createTask('VDI Export', vdi.name_label)
@@ -1727,16 +1844,13 @@ export default class Xapi extends XapiBase {
}`)
const task = this._watchTask(taskRef)
return httpRequest({
return httpRequest($cancelToken, {
hostname: host.address,
path: '/export_raw_vdi/',
query
protocol: 'https',
query,
rejectUnauthorized: false
}).then(response => {
response.cancel = (cancel => () => {
return new Promise(resolve => {
resolve(cancel())
}).then(() => task::pCatch(noop))
})(response.cancel)
response.task = task
return response
@@ -1778,9 +1892,10 @@ export default class Xapi extends XapiBase {
task,
put(stream, {
hostname: pbd.$host.address,
method: 'put',
path: '/import_raw_vdi/',
query
protocol: 'https',
query,
rejectUnauthorized: false
}, task)
])
}
@@ -1799,15 +1914,14 @@ export default class Xapi extends XapiBase {
async _createVif (vm, network, {
mac = '',
mtu = network.MTU,
position = undefined,
currently_attached = true,
device = position != null ? String(position) : undefined,
ipv4_allowed = undefined,
ipv6_allowed = undefined,
locking_mode = undefined,
MAC = mac,
MTU = mtu,
other_config = {},
qos_algorithm_params = {},
qos_algorithm_type = ''
@@ -1824,7 +1938,7 @@ export default class Xapi extends XapiBase {
ipv6_allowed,
locking_mode,
MAC,
MTU: asInteger(MTU),
MTU: asInteger(network.MTU),
network: network.$ref,
other_config,
qos_algorithm_params,
@@ -1832,18 +1946,13 @@ export default class Xapi extends XapiBase {
VM: vm.$ref
}))
if (isVmRunning(vm)) {
if (currently_attached && isVmRunning(vm)) {
await this.call('VIF.plug', vifRef)
}
return vifRef
}
// TODO: check whether the VIF was unplugged before.
async _deleteVif (vif) {
await this.call('VIF.destroy', vif.$ref)
}
async createVif (vmId, networkId, opts = undefined) {
return /* await */ this._getOrWaitObject(
await this._createVif(
@@ -1854,10 +1963,6 @@ export default class Xapi extends XapiBase {
)
}
async deleteVif (vifId) {
await this._deleteVif(this.getObject(vifId))
}
async createNetwork ({
name,
description = 'Created with Xen Orchestra',
@@ -1878,10 +1983,69 @@ export default class Xapi extends XapiBase {
return this._getOrWaitObject(networkRef)
}
async editPif (
pifId,
{ vlan }
) {
const pif = this.getObject(pifId)
const physPif = find(this.objects.all, obj => (
obj.$type === 'pif' &&
(obj.physical || !isEmpty(obj.bond_master_of)) &&
obj.$pool === pif.$pool &&
obj.device === pif.device
))
if (!physPif) {
throw new Error('PIF not found')
}
const pifs = this.getObject(pif.network).$PIFs
const wasAttached = {}
forEach(pifs, pif => {
wasAttached[pif.host] = pif.currently_attached
})
const vlans = uniq(mapToArray(pifs, pif => pif.VLAN_master_of))
await Promise.all(
mapToArray(vlans, vlan => vlan !== NULL_REF && this.call('VLAN.destroy', vlan))
)
const newPifs = await this.call('pool.create_VLAN_from_PIF', physPif.$ref, pif.network, asInteger(vlan))
await Promise.all(
mapToArray(newPifs, pifRef =>
!wasAttached[this.getObject(pifRef).host] && this.call('PIF.unplug', pifRef)::pCatch(noop)
)
)
}
async createBondedNetwork ({
bondMode,
mac,
pifIds,
...params
}) {
const network = await this.createNetwork(params)
// TODO: test and confirm:
// Bond.create is called here with PIFs from one host but XAPI should then replicate the
// bond on each host in the same pool with the corresponding PIFs (ie same interface names?).
await this.call('Bond.create', network.$ref, map(pifIds, pifId => this.getObject(pifId).$ref), mac, bondMode)
return network
}
async deleteNetwork (networkId) {
const network = this.getObject(networkId)
const pifs = network.$PIFs
const vlans = uniq(mapToArray(pifs, pif => pif.VLAN_master_of))
await Promise.all(
mapToArray(network.$PIFs, (pif) => this.call('VLAN.destroy', pif.$VLAN_master_of.$ref))
mapToArray(vlans, vlan => vlan !== NULL_REF && this.call('VLAN.destroy', vlan))
)
const bonds = uniq(flatten(mapToArray(pifs, pif => pif.bond_master_of)))
await Promise.all(
mapToArray(bonds, bond => this.call('Bond.destroy', bond))
)
await this.call('network.destroy', network.$ref)
@@ -1960,7 +2124,7 @@ export default class Xapi extends XapiBase {
const buffer = fatfsBufferInit()
const vdi = await this.createVdi(buffer.length, { name_label: 'XO CloudConfigDrive', name_description: undefined, sr: sr.$ref })
// Then, generate a FAT fs
const fs = fatfs.createFileSystem(fatfsBuffer(buffer))::promisifyAll()
const fs = promisifyAll(fatfs.createFileSystem(fatfsBuffer(buffer)))
// Create Cloud config folders
await fs.mkdir('openstack')
await fs.mkdir('openstack/latest')
@@ -1978,5 +2142,4 @@ export default class Xapi extends XapiBase {
}
// =================================================================
}

View File

View File

@@ -3,6 +3,28 @@ import { isEmpty } from '../../utils'
import { makeEditObject } from '../utils'
export default {
async _connectVif (vif) {
await this.call('VIF.plug', vif.$ref)
},
async connectVif (vifId) {
await this._connectVif(this.getObject(vifId))
},
async _deleteVif (vif) {
await this.call('VIF.destroy', vif.$ref)
},
async deleteVif (vifId) {
const vif = this.getObject(vifId)
if (vif.currently_attached) {
await this._disconnectVif(vif)
}
await this._deleteVif(vif)
},
async _disconnectVif (vif) {
await this.call('VIF.unplug_force', vif.$ref)
},
async disconnectVif (vifId) {
await this._disconnectVif(this.getObject(vifId))
},
editVif: makeEditObject({
ipv4Allowed: {
get: true,

View File

@@ -1,16 +1,17 @@
import filter from 'lodash/filter'
import includes from 'lodash/includes'
import some from 'lodash/some'
import sortBy from 'lodash/sortBy'
import unzip from 'julien-f-unzip'
import httpProxy from '../../http-proxy'
import httpRequest from '../../http-request'
import { debounce } from '../../decorators'
import { GenericError } from '../../api-errors'
import {
createRawObject,
ensureArray,
forEach,
mapFilter,
mapToArray,
parseXml
} from '../../utils'
@@ -30,7 +31,7 @@ export default {
)
if (statusCode !== 200) {
throw new GenericError('cannot fetch patches list from Citrix')
throw new Error('cannot fetch patches list from Citrix')
}
const data = parseXml(await readAll()).patchdata
@@ -150,6 +151,27 @@ export default {
)
},
async _ejectToolsIsos (hostRef) {
return Promise.all(mapFilter(
this.objects.all,
vm => {
if (vm.$type !== 'vm' || (hostRef && vm.resident_on !== hostRef)) {
return
}
const shouldEjectCd = some(vm.$VBDs, vbd => {
const vdi = vbd.$VDI
return vdi && vdi.is_tools_iso
})
if (shouldEjectCd) {
return this.ejectCdFromVm(vm.$id)
}
}
))
},
// -----------------------------------------------------------------
_isPoolPatchInstallableOnHost (patchUuid, host) {
@@ -183,10 +205,12 @@ export default {
put(stream, {
hostname: this.pool.$master.address,
path: '/pool_patch_upload',
protocol: 'https',
query: {
session_id: this.sessionId,
task_id: taskRef
}
},
rejectUnauthorized: false
}, task)
])
@@ -226,7 +250,8 @@ export default {
async _installPoolPatchOnHost (patchUuid, host) {
debug('installing patch %s', patchUuid)
const patch = await this._getOrUploadPoolPatch(patchUuid)
const [ patch ] = await Promise.all([ this._getOrUploadPoolPatch(patchUuid), this._ejectToolsIsos(host.$ref) ])
await this.call('pool_patch.apply', patch.$ref, host.$ref)
},
@@ -240,7 +265,7 @@ export default {
// -----------------------------------------------------------------
async installPoolPatchOnAllHosts (patchUuid) {
const patch = await this._getOrUploadPoolPatch(patchUuid)
const [ patch ] = await Promise.all([ this._getOrUploadPoolPatch(patchUuid), this._ejectToolsIsos() ])
await this.call('pool_patch.pool_apply', patch.$ref)
},
@@ -279,7 +304,11 @@ export default {
const patch = installable[i]
if (this._isPoolPatchInstallableOnHost(patch.uuid, host)) {
await this._installPoolPatchOnHostAndRequirements(patch, host, installableByUuid)
await this._installPoolPatchOnHostAndRequirements(patch, host, installableByUuid).catch(error => {
if (error.code !== 'PATCH_ALREADY_APPLIED') {
throw error
}
})
host = this.getObject(host.$id)
}
}

View File

@@ -1,10 +1,9 @@
import deferrable from 'golike-defer'
import find from 'lodash/find'
import gte from 'lodash/gte'
import isEmpty from 'lodash/isEmpty'
import lte from 'lodash/lte'
import {
deferrable
} from '../../decorators'
import {
forEach,
mapToArray,
@@ -16,7 +15,8 @@ import {
import {
isVmHvm,
isVmRunning,
makeEditObject
makeEditObject,
NULL_REF
} from '../utils'
export default {
@@ -26,8 +26,6 @@ export default {
name_label, // deprecated
nameLabel = name_label, // eslint-disable-line camelcase
bootAfterCreate = false,
clone = true,
installRepository = undefined,
vdis = undefined,
@@ -55,7 +53,7 @@ export default {
// Clones the template.
const vmRef = await this[clone ? '_cloneVm' : '_copyVm'](template, nameLabel)
$onFailure(() => this.deleteVm(vmRef, true)::pCatch(noop))
$onFailure(() => this.deleteVm(vmRef)::pCatch(noop))
// TODO: copy BIOS strings?
@@ -77,7 +75,7 @@ export default {
const isHvm = isVmHvm(vm)
if (isHvm) {
if (!vdis.length || installMethod === 'network') {
if (!isEmpty(vdis) || installMethod === 'network') {
const { HVM_boot_params: bootParams } = vm
let order = bootParams.order
if (order) {
@@ -107,6 +105,9 @@ export default {
}
}
let nVbds = vm.VBDs.length
let hasBootableDisk = !!find(vm.$VBDs, 'bootable')
// Inserts the CD if necessary.
if (installMethod === 'cd') {
// When the VM is started, if PV, the CD drive will become not
@@ -114,14 +115,13 @@ export default {
await this._insertCdIntoVm(installRepository, vm, {
bootable: true
})
}
hasBootableDisk = true
let nDisks = 0
++nVbds
}
// Modify existing (previous template) disks if necessary
existingVdis && await Promise.all(mapToArray(existingVdis, async ({ size, $SR: srId, ...properties }, userdevice) => {
++nDisks
const vbd = find(vm.$VBDs, { userdevice })
if (!vbd) {
return
@@ -145,10 +145,10 @@ export default {
// Creates the user defined VDIs.
//
// TODO: set vm.suspend_SR
if (vdis) {
if (!isEmpty(vdis)) {
const devices = await this.call('VM.get_allowed_VBD_devices', vm.$ref)
await Promise.all(mapToArray(vdis, (vdiDescription, i) => {
++nDisks
++nVbds
return this._createVdi(
vdiDescription.size, // FIXME: Should not be done in Xapi.
@@ -160,9 +160,8 @@ export default {
)
.then(ref => this._getOrWaitObject(ref))
.then(vdi => this._createVbd(vm, vdi, {
// Only the first VBD if installMethod is not cd is bootable.
bootable: installMethod !== 'cd' && !i,
// Either the CD or the 1st disk is bootable (only useful for PV VMs)
bootable: !(hasBootableDisk || i),
userdevice: devices[i]
}))
}))
@@ -172,24 +171,32 @@ export default {
await Promise.all(mapToArray(vm.$VIFs, vif => this._deleteVif(vif)))
// Creates the VIFs specified by the user.
let nVifs = 0
if (vifs) {
const devices = await this.call('VM.get_allowed_VIF_devices', vm.$ref)
await Promise.all(mapToArray(vifs, (vif, index) => this._createVif(
vm,
this.getObject(vif.network),
{
device: devices[index],
mac: vif.mac,
mtu: vif.mtu
}
)))
await Promise.all(mapToArray(vifs, (vif, index) => {
++nVifs
return this._createVif(
vm,
this.getObject(vif.network),
{
ipv4_allowed: vif.ipv4_allowed,
ipv6_allowed: vif.ipv6_allowed,
device: devices[index],
locking_mode: isEmpty(vif.ipv4_allowed) && isEmpty(vif.ipv6_allowed) ? 'network_default' : 'locked',
mac: vif.mac,
mtu: vif.mtu
}
)
}))
}
// TODO: Assign VGPUs.
if (cloudConfig != null) {
// Refresh the record.
vm = await this._waitObject(vm.$id, vm => vm.VBDs.length === nDisks)
vm = await this._waitObjectState(vm.$id, vm => vm.VBDs.length === nVbds)
// Find the SR of the first VDI.
let srRef
@@ -208,19 +215,32 @@ export default {
? 'createCoreOsCloudInitConfigDrive'
: 'createCloudInitConfigDrive'
await this[method](vm.$id, srRef, cloudConfig)
++nVbds
}
if (bootAfterCreate) {
this._startVm(vm)::pCatch(noop)
}
return this._waitObject(vm.$id)
// wait for the record with all the VBDs and VIFs
return this._waitObjectState(vm.$id, vm =>
vm.VBDs.length === nVbds &&
vm.VIFs.length === nVifs
)
},
// High level method to edit a VM.
//
// Params do not correspond directly to XAPI props.
_editVm: makeEditObject({
affinityHost: {
get: 'affinity',
set (value, vm) {
return this._setObjectProperty(
vm,
'affinity',
value ? this.getObject(value).$ref : NULL_REF
)
}
},
autoPoweron: {
set (value, vm) {
return Promise.all([
@@ -324,7 +344,20 @@ export default {
tags: true
}),
async editVm (id, props) {
return /* await */ this._editVm(this.getObject(id), props)
async editVm (id, props, checkLimits) {
return /* await */ this._editVm(this.getObject(id), props, checkLimits)
},
async revertVm (snapshotId, snapshotBefore = true) {
const snapshot = this.getObject(snapshotId)
if (snapshotBefore) {
await this._snapshotVm(snapshot.$snapshot_of)
}
return this.call('VM.revert', snapshot.$ref)
},
async resumeVm (vmId) {
// the force parameter is always true
return this.call('VM.resume', this.getObject(vmId).$ref, false, true)
}
}

View File

@@ -1,3 +1,5 @@
import { NULL_REF } from './utils'
const OTHER_CONFIG_TEMPLATE = {
actions_after_crash: 'restart',
actions_after_reboot: 'restart',
@@ -32,7 +34,7 @@ const OTHER_CONFIG_TEMPLATE = {
hpet: 'true',
viridian: 'true'
},
protection_policy: 'OpaqueRef:NULL',
protection_policy: NULL_REF,
PV_args: '',
PV_bootloader: '',
PV_bootloader_args: '',

View File

@@ -17,6 +17,7 @@ import {
isInteger,
isString,
map,
mapFilter,
mapToArray,
noop,
pFinally
@@ -141,17 +142,6 @@ export const isVmRunning = vm => VM_RUNNING_POWER_STATES[vm.power_state]
const _DEFAULT_ADD_TO_LIMITS = (next, current) => next - current
const _mapFilter = (collection, iteratee) => {
const result = []
forEach(collection, (...args) => {
const value = iteratee(...args)
if (value) {
result.push(value)
}
})
return result
}
export const makeEditObject = specs => {
const normalizeGet = (get, name) => {
if (get === true) {
@@ -317,14 +307,20 @@ export const makeEditObject = specs => {
const cbs = []
forEach(constraints, (constraint, constraintName) => {
// This constraint value is already defined: bypass the constraint.
if (values[constraintName] != null) {
return
}
// Before setting a property to a new value, if the constraint check fails (e.g. memoryMin > memoryMax):
// - if the user wants to set the constraint (ie constraintNewValue is defined):
// constraint <-- constraintNewValue THEN property <-- value (e.g. memoryMax <-- 2048 THEN memoryMin <-- 1024)
// - if the user DOES NOT want to set the constraint (ie constraintNewValue is NOT defined):
// constraint <-- value THEN property <-- value (e.g. memoryMax <-- 1024 THEN memoryMin <-- 1024)
// FIXME: Some values combinations will lead to setting the same property twice, which is not perfect but works for now.
const constraintCurrentValue = specs[constraintName].get(object)
const constraintNewValue = values[constraintName]
if (!constraint(specs[constraintName].get(object), value)) {
const cb = set(value, constraintName)
cbs.push(cb)
if (!constraint(constraintCurrentValue, value)) {
const cb = set(constraintNewValue == null ? value : constraintNewValue, constraintName)
if (cb) {
cbs.push(cb)
}
}
})
@@ -336,7 +332,7 @@ export const makeEditObject = specs => {
return cb
}
const cbs = _mapFilter(values, set)
const cbs = mapFilter(values, set)
if (checkLimits) {
await checkLimits(limits, object)
@@ -348,6 +344,10 @@ export const makeEditObject = specs => {
// ===================================================================
export const NULL_REF = 'OpaqueRef:NULL'
// ===================================================================
// HTTP put, use an ugly hack if the length is not known because XAPI
// does not support chunk encoding.
export const put = (stream, {

View File

View File

@@ -19,11 +19,29 @@ export default class {
constructor (xo) {
this._xo = xo
this._acls = new Acls({
const aclsDb = this._acls = new Acls({
connection: xo._redis,
prefix: 'xo:acl',
indexes: ['subject', 'object']
})
xo.on('start', () => {
xo.addConfigManager('acls',
() => aclsDb.get(),
acls => aclsDb.update(acls)
)
})
xo.on('clean', async () => {
const acls = await aclsDb.get()
const toRemove = []
forEach(acls, ({ subject, object, action, id }) => {
if (!subject || !object || !action) {
toRemove.push(id)
}
})
await aclsDb.remove(toRemove)
})
}
async _getAclsForUser (userId) {
@@ -39,10 +57,9 @@ export default class {
push.apply(acls, entries)
})(acls.push)
const collection = this._acls
await Promise.all(mapToArray(
subjects,
subject => collection.get({subject}).then(pushAcls)
subject => this.getAclsForSubject(subject).then(pushAcls)
))
return acls
@@ -67,6 +84,10 @@ export default class {
return this._acls.get()
}
async getAclsForSubject (subjectId) {
return this._acls.get({ subject: subjectId })
}
async getPermissionsForUser (userId) {
const [
acls,

View File

@@ -1,16 +1,12 @@
import createDebug from 'debug'
const debug = createDebug('xo:api')
import kindOf from 'kindof'
import ms from 'ms'
import schemaInspector from 'schema-inspector'
import * as methods from '../api'
import {
MethodNotFound,
InvalidParameters,
Unauthorized
} from '../api-errors'
MethodNotFound
} from 'json-rpc-peer'
import {
createRawObject,
forEach,
@@ -19,8 +15,12 @@ import {
serializeError
} from '../utils'
import * as errors from 'xo-common/api-errors'
// ===================================================================
const debug = createDebug('xo:api')
const PERMISSIONS = {
none: 0,
read: 1,
@@ -28,6 +28,26 @@ const PERMISSIONS = {
admin: 3
}
// TODO:
// - error when adding a server to a pool with incompatible version
// - error when halted VM migration failure is due to XS < 7
const XAPI_ERROR_TO_XO_ERROR = {
EHOSTUNREACH: errors.serverUnreachable,
HOST_OFFLINE: ([ host ], getId) => errors.hostOffline({ host: getId(host) }),
NO_HOSTS_AVAILABLE: errors.noHostsAvailable,
NOT_SUPPORTED_DURING_UPGRADE: errors.notSupportedDuringUpgrade,
OPERATION_BLOCKED: ([ ref, code ], getId) => errors.operationBlocked({ objectId: getId(ref), code }),
PATCH_PRECHECK_FAILED_ISO_MOUNTED: ([ patch ]) => errors.patchPrecheck({ errorType: 'isoMounted', patch }),
PIF_VLAN_EXISTS: ([ pif ], getId) => errors.objectAlreadyExists({ objectId: getId(pif), objectType: 'PIF' }),
SESSION_AUTHENTICATION_FAILED: errors.authenticationFailed,
VDI_IN_USE: ([ vdi, operation ], getId) => errors.vdiInUse({ vdi: getId(vdi), operation }),
VM_BAD_POWER_STATE: ([ vm, expected, actual ], getId) => errors.vmBadPowerState({ vm: getId(vm), expected, actual }),
VM_IS_TEMPLATE: errors.vmIsTemplate,
VM_LACKS_FEATURE: ([ vm ], getId) => errors.vmLacksFeature({ vm: getId(vm) }),
VM_LACKS_FEATURE_SHUTDOWN: ([ vm ], getId) => errors.vmLacksFeature({ vm: getId(vm), feature: 'shutdown' }),
VM_MISSING_PV_DRIVERS: ([ vm ], getId) => errors.vmMissingPvDrivers({ vm: getId(vm) })
}
const hasPermission = (user, permission) => (
PERMISSIONS[user.permission] >= PERMISSIONS[permission]
)
@@ -44,7 +64,7 @@ function checkParams (method, params) {
}, params)
if (!result.valid) {
throw new InvalidParameters(result.error)
throw errors.invalidParameters(result.error)
}
}
@@ -60,7 +80,7 @@ function checkPermission (method) {
const {user} = this
if (!user) {
throw new Unauthorized()
throw errors.unauthorized()
}
// The only requirement is login.
@@ -69,7 +89,7 @@ function checkPermission (method) {
}
if (!hasPermission(user, permission)) {
throw new Unauthorized()
throw errors.unauthorized()
}
}
@@ -81,7 +101,7 @@ function resolveParams (method, params) {
const {user} = this
if (!user) {
throw new Unauthorized()
throw errors.unauthorized()
}
const userId = user.id
@@ -117,7 +137,7 @@ function resolveParams (method, params) {
return params
}
throw new Unauthorized()
throw errors.unauthorized()
})
}
@@ -282,6 +302,17 @@ export default class Api {
)
}
const xoError = XAPI_ERROR_TO_XO_ERROR[error.code]
if (xoError) {
throw xoError(error.params, ref => {
try {
return this._xo.getObject(ref).id
} catch (e) {
return ref
}
})
}
throw error
}
}

View File

@@ -1,9 +1,8 @@
import Token, { Tokens } from '../models/token'
import {
NoSuchObject
} from '../api-errors'
import { noSuchObject } from 'xo-common/api-errors'
import {
createRawObject,
forEach,
generateToken,
pCatch,
noop
@@ -11,13 +10,10 @@ import {
// ===================================================================
class NoSuchAuthenticationToken extends NoSuchObject {
constructor (id) {
super(id, 'authentication token')
}
}
const noSuchAuthenticationToken = id =>
noSuchObject(id, 'authenticationToken')
// ===================================================================
const ONE_MONTH = 1e3 * 60 * 60 * 24 * 30
export default class {
constructor (xo) {
@@ -30,7 +26,7 @@ export default class {
this._providers = new Set()
// Creates persistent collections.
this._tokens = new Tokens({
const tokensDb = this._tokens = new Tokens({
connection: xo._redis,
prefix: 'xo:token',
indexes: ['user_id']
@@ -61,9 +57,26 @@ export default class {
try {
return (await xo.getAuthenticationToken(tokenId)).user_id
} catch (e) {
return
}
} catch (error) {}
})
xo.on('clean', async () => {
const tokens = await tokensDb.get()
const toRemove = []
const now = Date.now()
forEach(tokens, ({ expiration, id }) => {
if (!expiration || expiration < now) {
toRemove.push(id)
}
})
await tokensDb.remove(toRemove)
})
xo.on('start', () => {
xo.addConfigManager('authTokens',
() => tokensDb.get(),
tokens => tokensDb.update(tokens)
)
})
}
@@ -141,7 +154,7 @@ export default class {
const token = new Token({
id: await generateToken(),
user_id: userId,
expiration: Date.now() + 1e3 * 60 * 60 * 24 * 30 // 1 month validity.
expiration: Date.now() + ONE_MONTH
})
await this._tokens.add(token)
@@ -152,14 +165,14 @@ export default class {
async deleteAuthenticationToken (id) {
if (!await this._tokens.remove(id)) {
throw new NoSuchAuthenticationToken(id)
throw noSuchAuthenticationToken(id)
}
}
async getAuthenticationToken (id) {
let token = await this._tokens.first(id)
if (!token) {
throw new NoSuchAuthenticationToken(id)
throw noSuchAuthenticationToken(id)
}
token = token.properties
@@ -169,7 +182,7 @@ export default class {
)) {
this._tokens.remove(id)::pCatch(noop)
throw new NoSuchAuthenticationToken(id)
throw noSuchAuthenticationToken(id)
}
return token

View File

@@ -1,29 +1,44 @@
import endsWith from 'lodash/endsWith'
import deferrable from 'golike-defer'
import escapeStringRegexp from 'escape-string-regexp'
import eventToPromise from 'event-to-promise'
import filter from 'lodash/filter'
import find from 'lodash/find'
import findIndex from 'lodash/findIndex'
import sortBy from 'lodash/sortBy'
import startsWith from 'lodash/startsWith'
import execa from 'execa'
import splitLines from 'split-lines'
import { createParser as createPairsParser } from 'parse-pairs'
import { createReadStream, readdir, stat } from 'fs'
import { satisfies as versionSatisfies } from 'semver'
import { utcFormat } from 'd3-time-format'
import {
basename,
dirname
} from 'path'
import { satisfies as versionSatisfies } from 'semver'
import vhdMerge from '../vhd-merge'
import xapiObjectToXo from '../xapi-object-to-xo'
import {
deferrable
} from '../decorators'
endsWith,
filter,
find,
findIndex,
includes,
once,
sortBy,
startsWith,
trim
} from 'lodash'
import vhdMerge, { chainVhd } from '../vhd-merge'
import xapiObjectToXo from '../xapi-object-to-xo'
import { lvs, pvs } from '../lvm'
import {
forEach,
mapFilter,
mapToArray,
noop,
pCatch,
pFinally,
pFromCallback,
pSettle,
safeDateFormat
resolveSubpath,
safeDateFormat,
safeDateParse,
tmpDir
} from '../utils'
import {
VDI_FORMAT_VHD
@@ -34,6 +49,8 @@ import {
const DELTA_BACKUP_EXT = '.json'
const DELTA_BACKUP_EXT_LENGTH = DELTA_BACKUP_EXT.length
const shortDate = utcFormat('%Y-%m-%d')
// Test if a file is a vdi backup. (full or delta)
const isVdiBackup = name => /^\d+T\d+Z_(?:full|delta)\.vhd$/.test(name)
@@ -41,6 +58,41 @@ const isVdiBackup = name => /^\d+T\d+Z_(?:full|delta)\.vhd$/.test(name)
const isDeltaVdiBackup = name => /^\d+T\d+Z_delta\.vhd$/.test(name)
const isFullVdiBackup = name => /^\d+T\d+Z_full\.vhd$/.test(name)
const toTimestamp = date => date && Math.round(date.getTime() / 1000)
const parseVmBackupPath = name => {
const base = basename(name)
let baseMatches
baseMatches = /^([^_]+)_([^_]+)_(.+)\.xva$/.exec(base)
if (baseMatches) {
return {
datetime: toTimestamp(safeDateParse(baseMatches[1])),
id: name,
name: baseMatches[3],
tag: baseMatches[2],
type: 'xva'
}
}
let dirMatches
if (
(baseMatches = /^([^_]+)_(.+)\.json$/.exec(base)) &&
(dirMatches = /^vm_delta_([^_]+)_(.+)$/.exec(basename(dirname(name))))
) {
return {
datetime: toTimestamp(safeDateParse(baseMatches[1])),
id: name,
name: baseMatches[2],
tag: dirMatches[1],
type: 'delta',
uuid: dirMatches[2]
}
}
throw new Error('invalid VM backup filename')
}
// Get the timestamp of a vdi backup. (full or delta)
const getVdiTimestamp = name => {
const arr = /^(\d+T\d+Z)_(?:full|delta)\.vhd$/.exec(name)
@@ -50,21 +102,200 @@ const getVdiTimestamp = name => {
const getDeltaBackupNameWithoutExt = name => name.slice(0, -DELTA_BACKUP_EXT_LENGTH)
const isDeltaBackup = name => endsWith(name, DELTA_BACKUP_EXT)
// Checksums have been corrupted between 5.2.6 and 5.2.7.
//
// For a short period of time, bad checksums will be regenerated
// instead of rejected.
//
// TODO: restore when enough time has passed (a week/a month).
async function checkFileIntegrity (handler, name) {
let stream
await handler.refreshChecksum(name)
// let stream
//
// try {
// stream = await handler.createReadStream(name, { checksum: true })
// } catch (error) {
// if (error.code === 'ENOENT') {
// return
// }
//
// throw error
// }
//
// stream.resume()
// await eventToPromise(stream, 'finish')
}
try {
stream = await handler.createReadStream(name, { checksum: true })
} catch (error) {
if (error.code === 'ENOENT') {
return
}
// -------------------------------------------------------------------
throw error
const listPartitions = (() => {
const IGNORED = {}
forEach([
// https://github.com/jhermsmeier/node-mbr/blob/master/lib/partition.js#L38
0x05, 0x0F, 0x85, 0x15, 0x91, 0x9B, 0x5E, 0x5F, 0xCF, 0xD5, 0xC5,
0x82 // swap
], type => {
IGNORED[type] = true
})
const TYPES = {
0x7: 'NTFS',
0x83: 'linux',
0xc: 'FAT'
}
stream.resume()
await eventToPromise(stream, 'finish')
const parseLine = createPairsParser({
keyTransform: key => key === 'UUID'
? 'id'
: key.toLowerCase(),
valueTransform: (value, key) => key === 'start' || key === 'size'
? +value
: key === 'type'
? TYPES[+value] || value
: value
})
return device => execa.stdout('partx', [
'--bytes',
'--output=NR,START,SIZE,NAME,UUID,TYPE',
'--pairs',
device.path
]).then(stdout => mapFilter(splitLines(stdout), line => {
const partition = parseLine(line)
const { type } = partition
if (type != null && !IGNORED[+type]) {
return partition
}
}))
})()
// handle LVM logical volumes automatically
const listPartitions2 = device => listPartitions(device).then(partitions => {
const partitions2 = []
const promises = []
forEach(partitions, partition => {
if (+partition.type === 0x8e) {
promises.push(mountLvmPv(device, partition).then(device => {
const promise = listLvmLvs(device).then(lvs => {
forEach(lvs, lv => {
partitions2.push({
name: lv.lv_name,
size: +lv.lv_size,
id: `${partition.id}/${lv.vg_name}/${lv.lv_name}`
})
})
})
promise::pFinally(device.unmount)
return promise
}))
} else {
partitions2.push(partition)
}
})
return Promise.all(promises).then(() => partitions2)
})
const mountPartition = (device, partitionId) => Promise.all([
partitionId != null && listPartitions(device),
tmpDir()
]).then(([ partitions, path ]) => {
const options = [
'loop',
'ro'
]
if (partitions) {
const partition = find(partitions, { id: partitionId })
const { start } = partition
if (start != null) {
options.push(`offset=${start * 512}`)
}
}
const mount = options => execa('mount', [
`--options=${options.join(',')}`,
`--source=${device.path}`,
`--target=${path}`
])
// `noload` option is used for ext3/ext4, if it fails it might
// `be another fs, try without
return mount([ ...options, 'noload' ]).catch(() =>
mount(options)
).then(() => ({
path,
unmount: once(() => execa('umount', [ '--lazy', path ]))
}), error => {
console.log(error)
throw error
})
})
// handle LVM logical volumes automatically
const mountPartition2 = (device, partitionId) => {
if (
partitionId == null ||
!includes(partitionId, '/')
) {
return mountPartition(device, partitionId)
}
const [ pvId, vgName, lvName ] = partitionId.split('/')
return listPartitions(device).then(partitions =>
find(partitions, { id: pvId })
).then(pvId => mountLvmPv(device, pvId)).then(device1 =>
execa('vgchange', [ '-ay', vgName ]).then(() =>
lvs([ 'lv_name', 'lv_path' ], vgName).then(lvs =>
find(lvs, { lv_name: lvName }).lv_path
)
).then(path =>
mountPartition({ path }).then(device2 => ({
...device2,
unmount: () => device2.unmount().then(device1.unmount)
}))
).catch(error => device1.unmount().then(() => {
throw error
}))
)
}
// -------------------------------------------------------------------
const listLvmLvs = device => pvs([
'lv_name',
'lv_path',
'lv_size',
'vg_name'
], device.path).then(pvs => filter(pvs, 'lv_name'))
const mountLvmPv = (device, partition) => {
const args = []
if (partition) {
args.push('-o', partition.start * 512)
}
args.push(
'--show',
'-f',
device.path
)
return execa.stdout('losetup', args).then(stdout => {
const path = trim(stdout)
return {
path,
unmount: once(() => Promise.all([
execa('losetup', [ '-d', path ]),
pvs('vg_name', path).then(vgNames => execa('vgchange', [
'-an',
...vgNames
]))
]))
}
})
}
// ===================================================================
@@ -72,6 +303,15 @@ async function checkFileIntegrity (handler, name) {
export default class {
constructor (xo) {
this._xo = xo
// clean any LVM volumes that might have not been properly
// unmounted
xo.on('start', () => Promise.all([
execa('losetup', [ '-D' ]),
execa('vgchange', [ '-an' ])
]).then(() =>
execa('pvscan', [ '--cache' ])
))
}
async listRemoteBackups (remoteId) {
@@ -101,12 +341,52 @@ export default class {
return backups
}
async listVmBackups (remoteId) {
const handler = await this._xo.getRemoteHandler(remoteId)
const backups = []
await Promise.all(mapToArray(await handler.list(), entry => {
if (endsWith(entry, '.xva')) {
backups.push(parseVmBackupPath(entry))
} else if (startsWith(entry, 'vm_delta_')) {
return handler.list(entry).then(children => Promise.all(mapToArray(children, child => {
if (endsWith(child, '.json')) {
const path = `${entry}/${child}`
const record = parseVmBackupPath(path)
backups.push(record)
return handler.readFile(path).then(data => {
record.disks = mapToArray(JSON.parse(data).vdis, vdi => ({
id: `${entry}/${vdi.xoPath}`,
name: vdi.name_label,
uuid: vdi.uuid
}))
}).catch(noop)
}
})))
}
}))
return backups
}
async importVmBackup (remoteId, file, sr) {
const handler = await this._xo.getRemoteHandler(remoteId)
const stream = await handler.createReadStream(file)
const xapi = this._xo.getXapi(sr)
const vm = await xapi.importVm(stream, { srId: sr._xapiId })
const { datetime } = parseVmBackupPath(file)
await Promise.all([
xapi.addTag(vm.$id, 'restored from backup'),
xapi.editVm(vm.$id, {
name_label: `${vm.name_label} (${shortDate(datetime * 1e3)})`
})
])
return xapiObjectToXo(vm).id
}
@@ -141,7 +421,7 @@ export default class {
stream => stream.cancel()
))
return srcXapi.deleteVm(delta.vm.uuid, true)
return srcXapi.deleteVm(delta.vm.uuid)
})
const promise = targetXapi.importDeltaVm(
@@ -155,7 +435,7 @@ export default class {
// Once done, (asynchronously) remove the (now obsolete) local
// base.
if (localBaseUuid) {
promise.then(() => srcXapi.deleteVm(localBaseUuid, true))::pCatch(noop)
promise.then(() => srcXapi.deleteVm(localBaseUuid))::pCatch(noop)
}
// (Asynchronously) Identify snapshot as future base.
@@ -291,6 +571,18 @@ export default class {
return backups.slice(i)
}
// fix the parent UUID and filename in delta files after download from xapi or backup compression
async _chainDeltaVdiBackups ({handler, dir}) {
const backups = await this._listVdiBackups(handler, dir)
for (let i = 1; i < backups.length; i++) {
const childPath = dir + '/' + backups[i]
const modified = await chainVhd(handler, dir + '/' + backups[i - 1], handler, childPath)
if (modified) {
await handler.refreshChecksum(childPath)
}
}
}
async _mergeDeltaVdiBackups ({handler, dir, depth}) {
const backups = await this._listVdiBackups(handler, dir)
let i = backups.length - depth
@@ -432,16 +724,7 @@ export default class {
@deferrable.onFailure
async rollingDeltaVmBackup ($onFailure, {vm, remoteId, tag, depth}) {
const remote = await this._xo.getRemote(remoteId)
if (!remote) {
throw new Error(`No such Remote ${remoteId}`)
}
if (!remote.enabled) {
throw new Error(`Remote ${remoteId} is disabled`)
}
const handler = await this._xo.getRemoteHandler(remote)
const handler = await this._xo.getRemoteHandler(remoteId)
const xapi = this._xo.getXapi(vm)
vm = xapi.getObject(vm._xapiId)
@@ -452,7 +735,7 @@ export default class {
base => base.snapshot_time
)
const baseVm = bases.pop()
forEach(bases, base => { xapi.deleteVm(base.$id, true)::pCatch(noop) })
forEach(bases, base => { xapi.deleteVm(base.$id)::pCatch(noop) })
// Check backup dirs.
const dir = `vm_delta_${tag}_${vm.uuid}`
@@ -487,7 +770,7 @@ export default class {
stream => stream.cancel()
))
await xapi.deleteVm(delta.vm.$id, true)
await xapi.deleteVm(delta.vm.uuid)
})
// Save vdis.
@@ -515,15 +798,15 @@ export default class {
)
const fulFilledVdiBackups = []
let success = true
let error
// One or many vdi backups have failed.
for (const vdiBackup of vdiBackups) {
if (vdiBackup.isFulfilled()) {
fulFilledVdiBackups.push(vdiBackup)
} else {
console.error(`Rejected backup: ${vdiBackup.reason()}`)
success = false
error = vdiBackup.reason()
console.error('Rejected backup:', error)
}
}
@@ -535,8 +818,8 @@ export default class {
)
})
if (!success) {
throw new Error('Rolling delta vm backup failed.')
if (error) {
throw error
}
const date = safeDateFormat(new Date())
@@ -553,7 +836,9 @@ export default class {
mapToArray(vdiBackups, vdiBackup => {
const backupName = vdiBackup.value()
const backupDirectory = backupName.slice(0, backupName.lastIndexOf('/'))
return this._mergeDeltaVdiBackups({ handler, dir: `${dir}/${backupDirectory}`, depth })
const backupDir = `${dir}/${backupDirectory}`
return this._mergeDeltaVdiBackups({ handler, dir: backupDir, depth })
.then(() => { this._chainDeltaVdiBackups({ handler, dir: backupDir }) })
})
)
@@ -561,7 +846,7 @@ export default class {
await this._removeOldDeltaVmBackups(xapi, { vm, handler, dir, depth })
if (baseVm) {
xapi.deleteVm(baseVm.$id, true)::pCatch(noop)
xapi.deleteVm(baseVm.$id)::pCatch(noop)
}
// Returns relative path.
@@ -569,10 +854,13 @@ export default class {
}
async importDeltaVmBackup ({sr, remoteId, filePath}) {
filePath = `${filePath}${DELTA_BACKUP_EXT}`
const { datetime } = parseVmBackupPath(filePath)
const handler = await this._xo.getRemoteHandler(remoteId)
const xapi = this._xo.getXapi(sr)
const delta = JSON.parse(await handler.readFile(`${filePath}${DELTA_BACKUP_EXT}`))
const delta = JSON.parse(await handler.readFile(filePath))
let vm
const { version } = delta
@@ -599,9 +887,12 @@ export default class {
)
)
delta.vm.name_label += ` (${shortDate(datetime * 1e3)})`
delta.vm.tags.push('restored from backup')
vm = await xapi.importDeltaVm(delta, {
srId: sr._xapiId,
disableStartAfterImport: false
disableStartAfterImport: false,
srId: sr._xapiId
})
} else {
throw new Error(`Unsupported delta backup version: ${version}`)
@@ -613,16 +904,7 @@ export default class {
// -----------------------------------------------------------------
async backupVm ({vm, remoteId, file, compress, onlyMetadata}) {
const remote = await this._xo.getRemote(remoteId)
if (!remote) {
throw new Error(`No such Remote ${remoteId}`)
}
if (!remote.enabled) {
throw new Error(`Backup remote ${remoteId} is disabled`)
}
const handler = await this._xo.getRemoteHandler(remote)
const handler = await this._xo.getRemoteHandler(remoteId)
return this._backupVm(vm, handler, file, {compress, onlyMetadata})
}
@@ -640,16 +922,7 @@ export default class {
}
async rollingBackupVm ({vm, remoteId, tag, depth, compress, onlyMetadata}) {
const remote = await this._xo.getRemote(remoteId)
if (!remote) {
throw new Error(`No such Remote ${remoteId}`)
}
if (!remote.enabled) {
throw new Error(`Backup remote ${remoteId} is disabled`)
}
const handler = await this._xo.getRemoteHandler(remote)
const handler = await this._xo.getRemoteHandler(remoteId)
const files = await handler.list()
@@ -676,7 +949,7 @@ export default class {
const promises = []
for (let surplus = snapshots.length - (depth - 1); surplus > 0; surplus--) {
const oldSnap = snapshots.shift()
promises.push(xapi.deleteVm(oldSnap.uuid, true))
promises.push(xapi.deleteVm(oldSnap.uuid))
}
await Promise.all(promises)
}
@@ -709,7 +982,118 @@ export default class {
const n = 1 - depth
await Promise.all(mapToArray(n ? olderCopies.slice(0, n) : olderCopies, vm =>
// Do not consider a failure to delete an old copy as a fatal error.
targetXapi.deleteVm(vm.$id, true)::pCatch(noop)
targetXapi.deleteVm(vm.$id)::pCatch(noop)
))
}
// -----------------------------------------------------------------
_mountVhd (remoteId, vhdPath) {
return Promise.all([
this._xo.getRemoteHandler(remoteId),
tmpDir()
]).then(([ handler, mountDir ]) => {
if (!handler._getRealPath) {
throw new Error(`this remote is not supported`)
}
const remotePath = handler._getRealPath()
vhdPath = resolveSubpath(remotePath, vhdPath)
return Promise.resolve().then(() => {
// TODO: remove when no longer necessary.
//
// Currently, the filenames of the VHD changes over time
// (delta → full), but the JSON is not updated, therefore the
// VHD path may need to be fixed.
return endsWith(vhdPath, '_delta.vhd')
? pFromCallback(cb => stat(vhdPath, cb)).then(
() => vhdPath,
error => {
if (error && error.code === 'ENOENT') {
return `${vhdPath.slice(0, -10)}_full.vhd`
}
}
)
: vhdPath
}).then(vhdPath => execa('vhdimount', [ vhdPath, mountDir ])).then(() =>
pFromCallback(cb => readdir(mountDir, cb)).then(entries => {
let max = 0
forEach(entries, entry => {
const matches = /^vhdi(\d+)/.exec(entry)
if (matches) {
const value = +matches[1]
if (value > max) {
max = value
}
}
})
if (!max) {
throw new Error('no disks found')
}
return {
path: `${mountDir}/vhdi${max}`,
unmount: once(() => execa('fusermount', [ '-uz', mountDir ]))
}
})
)
})
}
_mountPartition (remoteId, vhdPath, partitionId) {
return this._mountVhd(remoteId, vhdPath).then(device =>
mountPartition2(device, partitionId).then(partition => ({
...partition,
unmount: () => partition.unmount().then(device.unmount)
})).catch(error => device.unmount().then(() => {
throw error
}))
)
}
@deferrable
async scanDiskBackup ($defer, remoteId, vhdPath) {
const device = await this._mountVhd(remoteId, vhdPath)
$defer(device.unmount)
return {
partitions: await listPartitions2(device)
}
}
@deferrable
async scanFilesInDiskBackup ($defer, remoteId, vhdPath, partitionId, path) {
const partition = await this._mountPartition(remoteId, vhdPath, partitionId)
$defer(partition.unmount)
path = resolveSubpath(partition.path, path)
const entries = await pFromCallback(cb => readdir(path, cb))
const entriesMap = {}
await Promise.all(mapToArray(entries, async name => {
const stats = await pFromCallback(cb => stat(`${path}/${name}`, cb))::pCatch(noop)
if (stats) {
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
}
}))
return entriesMap
}
async fetchFilesInDiskBackup (remoteId, vhdPath, partitionId, paths) {
const partition = await this._mountPartition(remoteId, vhdPath, partitionId)
let i = 0
const onEnd = () => {
if (!--i) {
partition.unmount()
}
}
return mapToArray(paths, path => {
++i
return createReadStream(resolveSubpath(partition.path, path)).once('end', onEnd)
})
}
}

View File

@@ -0,0 +1,33 @@
import { map, noop } from '../utils'
import { all as pAll } from 'promise-toolbox'
export default class ConfigManagement {
constructor () {
this._managers = { __proto__: null }
}
addConfigManager (id, exporter, importer) {
const managers = this._managers
if (id in managers) {
throw new Error(`${id} is already taken`)
}
this._managers[id] = { exporter, importer }
}
exportConfig () {
return map(this._managers, ({ exporter }, key) => exporter())::pAll()
}
importConfig (config) {
const managers = this._managers
return map(config, (entry, key) => {
const manager = managers[key]
if (manager) {
return manager.importer(entry)
}
})::pAll().then(noop)
}
}

View File

@@ -1,39 +1,54 @@
import highland from 'highland'
import concat from 'lodash/concat'
import countBy from 'lodash/countBy'
import diff from 'lodash/difference'
import findIndex from 'lodash/findIndex'
import flatten from 'lodash/flatten'
import highland from 'highland'
import includes from 'lodash/includes'
import isObject from 'lodash/isObject'
import keys from 'lodash/keys'
import mapValues from 'lodash/mapValues'
import pick from 'lodash/pick'
import remove from 'lodash/remove'
import synchronized from 'decorator-synchronized'
import { noSuchObject } from 'xo-common/api-errors'
import { fromCallback } from 'promise-toolbox'
import { NoSuchObject } from '../api-errors'
import {
forEach,
generateUnsecureToken,
isEmpty,
lightSet,
mapToArray,
streamToArray,
throwFn
} from '../utils'
// ===================================================================
class NoSuchIpPool extends NoSuchObject {
constructor (id) {
super(id, 'ip pool')
}
}
const normalize = ({
addresses,
id = throwFn('id is a required field'),
name = '',
networks
networks,
resourceSets
}) => ({
addresses,
id,
name,
networks
networks,
resourceSets
})
const _isAddressInIpPool = (address, network, ipPool) => (
ipPool.addresses && (address in ipPool.addresses) &&
includes(ipPool.networks, isObject(network) ? network.id : network)
)
// ===================================================================
// Note: an address cannot be in two different pools sharing a
// network.
export default class IpPools {
constructor (xo) {
this._store = null
@@ -41,6 +56,11 @@ export default class IpPools {
xo.on('start', async () => {
this._store = await xo.getStore('ipPools')
xo.addConfigManager('ipPools',
() => this.getAllIpPools(),
ipPools => Promise.all(mapToArray(ipPools, ipPool => this._save(ipPool)))
)
})
}
@@ -61,55 +81,181 @@ export default class IpPools {
const store = this._store
if (await store.has(id)) {
await Promise.all(mapToArray(await this._xo.getAllResourceSets(), async set => {
await this._xo.removeLimitFromResourceSet(`ipPool:${id}`, set.id)
return this._xo.removeIpPoolFromResourceSet(id, set.id)
}))
await this._removeIpAddressesFromVifs(
mapValues((await this.getIpPool(id)).addresses, 'vifs')
)
return store.del(id)
}
throw new NoSuchIpPool(id)
throw noSuchObject(id, 'ipPool')
}
getAllIpPools () {
_getAllIpPools (filter) {
return streamToArray(this._store.createValueStream(), {
filter,
mapper: normalize
})
}
async getAllIpPools (userId) {
let filter
if (userId != null) {
const user = await this._xo.getUser(userId)
if (user.permission !== 'admin') {
const resourceSets = await this._xo.getAllResourceSets(userId)
const ipPools = lightSet(flatten(mapToArray(resourceSets, 'ipPools')))
filter = ({ id }) => ipPools.has(id)
}
}
return this._getAllIpPools(filter)
}
getIpPool (id) {
return this._store.get(id).then(normalize, error => {
throw error.notFound ? new NoSuchIpPool(id) : error
throw error.notFound ? noSuchObject(id, 'ipPool') : error
})
}
allocIpAddress (address, vifId) {
// FIXME: does not work correctly if the address is in multiple
// pools.
return this._getForAddress(address).then(ipPool => {
const data = ipPool.addresses[address]
const vifs = data.vifs || (data.vifs = [])
if (!includes(vifs, vifId)) {
vifs.push(vifId)
return this._save(ipPool)
}
})
async _getAddressIpPool (address, network) {
const ipPools = await this._getAllIpPools(ipPool => _isAddressInIpPool(address, network, ipPool))
return ipPools && ipPools[0]
}
deallocIpAddress (address, vifId) {
return this._getForAddress(address).then(ipPool => {
const data = ipPool.addresses[address]
const vifs = data.vifs || (data.vifs = [])
const i = findIndex(vifs, id => id === vifId)
if (i !== -1) {
vifs.splice(i, 1)
return this._save(ipPool)
// Returns a map that indicates how many IPs from each IP pool the VM uses
// e.g.: { 'ipPool:abc': 3, 'ipPool:xyz': 7 }
async computeVmIpPoolsUsage (vm) {
const vifs = vm.VIFs
const ipPools = []
for (const vifId of vifs) {
const { allowedIpv4Addresses, allowedIpv6Addresses, $network } = this._xo.getObject(vifId)
for (const address of concat(allowedIpv4Addresses, allowedIpv6Addresses)) {
const ipPool = await this._getAddressIpPool(address, $network)
ipPool && ipPools.push(ipPool.id)
}
}
return countBy(ipPools, ({ id }) => `ipPool:${id}`)
}
@synchronized
allocIpAddresses (vifId, addAddresses, removeAddresses) {
const updatedIpPools = {}
const limits = {}
const xoVif = this._xo.getObject(vifId)
const xapi = this._xo.getXapi(xoVif)
const vif = xapi.getObject(xoVif._xapiId)
const allocAndSave = (() => {
const resourseSetId = xapi.xo.getData(vif.VM, 'resourceSet')
return () => {
const saveIpPools = () => Promise.all(mapToArray(updatedIpPools, ipPool => this._save(ipPool)))
return resourseSetId
? this._xo.allocateLimitsInResourceSet(limits, resourseSetId).then(
saveIpPools
)
: saveIpPools()
}
})()
return fromCallback(cb => {
const network = vif.$network
const networkId = network.$id
const isVif = id => id === vifId
highland(this._store.createValueStream()).each(ipPool => {
const { addresses, networks } = updatedIpPools[ipPool.id] || ipPool
if (!(addresses && networks && includes(networks, networkId))) {
return false
}
let allocations = 0
let changed = false
forEach(removeAddresses, address => {
let vifs, i
if (
(vifs = addresses[address]) &&
(vifs = vifs.vifs) &&
(i = findIndex(vifs, isVif)) !== -1
) {
vifs.splice(i, 1)
--allocations
changed = true
}
})
forEach(addAddresses, address => {
const data = addresses[address]
if (!data) {
return
}
const vifs = data.vifs || (data.vifs = [])
if (!includes(vifs, vifId)) {
vifs.push(vifId)
++allocations
changed = true
}
})
if (changed) {
const { id } = ipPool
updatedIpPools[id] = ipPool
limits[`ipPool:${id}`] = (limits[`ipPool:${id}`] || 0) + allocations
}
}).toCallback(cb)
}).then(allocAndSave)
}
async _removeIpAddressesFromVifs (mapAddressVifs) {
const mapVifAddresses = {}
forEach(mapAddressVifs, (vifs, address) => {
forEach(vifs, vifId => {
if (mapVifAddresses[vifId]) {
mapVifAddresses[vifId].push(address)
} else {
mapVifAddresses[vifId] = [ address ]
}
})
})
const { getXapi } = this._xo
return Promise.all(mapToArray(mapVifAddresses, (addresses, vifId) => {
let vif
try {
// The IP may not have been correctly deallocated from the IP pool when the VIF was deleted
vif = this._xo.getObject(vifId)
} catch (error) {
return
}
const { allowedIpv4Addresses, allowedIpv6Addresses } = vif
remove(allowedIpv4Addresses, address => includes(addresses, address))
remove(allowedIpv6Addresses, address => includes(addresses, address))
this.allocIpAddresses(vifId, undefined, concat(allowedIpv4Addresses, allowedIpv6Addresses))
return getXapi(vif).editVif(vif._xapiId, {
ipv4Allowed: allowedIpv4Addresses,
ipv6Allowed: allowedIpv6Addresses
})
}))
}
async updateIpPool (id, {
addresses,
name,
networks
networks,
resourceSets
}) {
const ipPool = await this.getIpPool(id)
const previousAddresses = { ...ipPool.addresses }
name != null && (ipPool.name = name)
if (addresses) {
@@ -121,6 +267,11 @@ export default class IpPools {
addresses_[address] = props
}
})
// Remove the addresses that are no longer in the IP pool from the concerned VIFs
const deletedAddresses = diff(keys(previousAddresses), keys(addresses_))
await this._removeIpAddressesFromVifs(pick(previousAddresses, deletedAddresses))
if (isEmpty(addresses_)) {
delete ipPool.addresses
} else {
@@ -133,6 +284,11 @@ export default class IpPools {
ipPool.networks = networks
}
// TODO: Implement patching like for addresses.
if (resourceSets) {
ipPool.resourceSets = resourceSets
}
await this._save(ipPool)
}
@@ -144,15 +300,6 @@ export default class IpPools {
return id
}
_getForAddress (address) {
return fromCallback(cb => {
highland(this._store.createValueStream()).find(ipPool => {
const { addresses } = ipPool
return addresses && addresses[address]
}).pull(cb)
})
}
_save (ipPool) {
ipPool = normalize(ipPool)
return this._store.put(ipPool.id, ipPool)

View File

@@ -1,29 +1,29 @@
import assign from 'lodash/assign'
import JobExecutor from '../job-executor'
import { Jobs } from '../models/job'
import {
GenericError,
NoSuchObject
} from '../api-errors'
// ===================================================================
class NoSuchJob extends NoSuchObject {
constructor (id) {
super(id, 'job')
}
}
import { mapToArray } from '../utils'
import { noSuchObject } from 'xo-common/api-errors'
// ===================================================================
export default class {
constructor (xo) {
this._executor = new JobExecutor(xo)
this._jobs = new Jobs({
const jobsDb = this._jobs = new Jobs({
connection: xo._redis,
prefix: 'xo:job',
indexes: ['user_id', 'key']
})
xo.on('start', () => {
xo.addConfigManager('jobs',
() => jobsDb.get(),
jobs => Promise.all(mapToArray(jobs, job =>
jobsDb.save(job)
))
)
})
}
async getAllJobs () {
@@ -33,21 +33,21 @@ export default class {
async getJob (id) {
const job = await this._jobs.first(id)
if (!job) {
throw new NoSuchJob(id)
throw noSuchObject(id, 'job')
}
return job.properties
}
async createJob (userId, job) {
async createJob (job) {
// TODO: use plain objects
const job_ = await this._jobs.create(userId, job)
const job_ = await this._jobs.create(job)
return job_.properties
}
async updateJob ({id, type, name, key, method, paramsVector}) {
async updateJob ({id, userId, timeout, type, name, key, method, paramsVector}) {
const oldJob = await this.getJob(id)
assign(oldJob, {type, name, key, method, paramsVector})
assign(oldJob, {userId, timeout, type, name, key, method, paramsVector})
return /* await */ this._jobs.save(oldJob)
}
@@ -56,24 +56,10 @@ export default class {
}
async runJobSequence (idSequence) {
const notFound = []
for (const id of idSequence) {
let job
try {
job = await this.getJob(id)
} catch (error) {
if (error instanceof NoSuchJob) {
notFound.push(id)
} else {
throw error
}
}
if (job) {
await this._executor.exec(job)
}
}
if (notFound.length > 0) {
throw new GenericError(`The following jobs were not found: ${notFound.join()}`)
const jobs = await Promise.all(mapToArray(idSequence, id => this.getJob(id)))
for (const job of jobs) {
await this._executor.exec(job)
}
}
}

View File

@@ -2,9 +2,9 @@ import createJsonSchemaValidator from 'is-my-json-valid'
import { PluginsMetadata } from '../models/plugin-metadata'
import {
InvalidParameters,
NoSuchObject
} from '../api-errors'
invalidParameters,
noSuchObject
} from 'xo-common/api-errors'
import {
createRawObject,
isFunction,
@@ -13,14 +13,6 @@ import {
// ===================================================================
class NoSuchPlugin extends NoSuchObject {
constructor (id) {
super(id, 'plugin')
}
}
// ===================================================================
export default class {
constructor (xo) {
this._plugins = createRawObject()
@@ -29,12 +21,21 @@ export default class {
connection: xo._redis,
prefix: 'xo:plugin-metadata'
})
xo.on('start', () => {
xo.addConfigManager('plugins',
() => this._pluginsMetadata.get(),
plugins => Promise.all(mapToArray(plugins, plugin =>
this._pluginsMetadata.save(plugin)
))
)
})
}
_getRawPlugin (id) {
const plugin = this._plugins[id]
if (!plugin) {
throw new NoSuchPlugin(id)
throw noSuchObject(id, 'plugin')
}
return plugin
}
@@ -51,16 +52,19 @@ export default class {
instance,
configurationSchema,
configurationPresets,
testSchema,
version
) {
const id = name
const plugin = this._plugins[id] = {
configured: !configurationSchema,
configurationPresets,
configurationSchema,
configured: !configurationSchema,
id,
instance,
name,
testable: isFunction(instance.test),
testSchema,
unloadable: isFunction(instance.unload),
version
}
@@ -68,7 +72,6 @@ export default class {
const metadata = await this._getPluginMetadata(id)
let autoload = true
let configuration
if (metadata) {
({
autoload,
@@ -97,7 +100,7 @@ export default class {
}
})
.catch(error => {
console.error('register plugin %s: %s', name, error && error.stack || error)
console.error('register plugin %s: %s', name, (error && error.stack) || error)
})
}
@@ -107,6 +110,8 @@ export default class {
configurationSchema,
loaded,
name,
testable,
testSchema,
unloadable,
version
} = this._getRawPlugin(id)
@@ -124,7 +129,9 @@ export default class {
version,
configuration,
configurationPresets,
configurationSchema
configurationSchema,
testable,
testSchema
}
}
@@ -139,12 +146,12 @@ export default class {
const { configurationSchema } = plugin
if (!configurationSchema) {
throw new InvalidParameters('plugin not configurable')
throw invalidParameters('plugin not configurable')
}
// See: https://github.com/mafintosh/is-my-json-valid/issues/116
if (configuration == null) {
throw new InvalidParameters([{
throw invalidParameters([{
field: 'data',
message: 'is the wrong type'
}])
@@ -152,7 +159,7 @@ export default class {
const validate = createJsonSchemaValidator(configurationSchema)
if (!validate(configuration)) {
throw new InvalidParameters(validate.errors)
throw invalidParameters(validate.errors)
}
// Sets the plugin configuration.
@@ -191,11 +198,11 @@ export default class {
async loadPlugin (id) {
const plugin = this._getRawPlugin(id)
if (plugin.loaded) {
throw new InvalidParameters('plugin already loaded')
throw invalidParameters('plugin already loaded')
}
if (!plugin.configured) {
throw new InvalidParameters('plugin not configured')
throw invalidParameters('plugin not configured')
}
await plugin.instance.load()
@@ -205,11 +212,11 @@ export default class {
async unloadPlugin (id) {
const plugin = this._getRawPlugin(id)
if (!plugin.loaded) {
throw new InvalidParameters('plugin already unloaded')
throw invalidParameters('plugin already unloaded')
}
if (plugin.unloadable === false) {
throw new InvalidParameters('plugin cannot be unloaded')
throw invalidParameters('plugin cannot be unloaded')
}
await plugin.instance.unload()
@@ -219,4 +226,31 @@ export default class {
async purgePluginConfiguration (id) {
await this._pluginsMetadata.merge(id, { configuration: undefined })
}
async testPlugin (id, data) {
const plugin = this._getRawPlugin(id)
if (!plugin.testable) {
throw invalidParameters('plugin not testable')
}
if (!plugin.loaded) {
throw invalidParameters('plugin not loaded')
}
const { testSchema } = plugin
if (testSchema) {
if (data == null) {
throw invalidParameters([{
field: 'data',
message: 'is the wrong type'
}])
}
const validate = createJsonSchemaValidator(testSchema)
if (!validate(data)) {
throw invalidParameters(validate.errors)
}
}
await plugin.instance.test(data)
}
}

View File

@@ -1,26 +1,18 @@
import { noSuchObject } from 'xo-common/api-errors'
import RemoteHandlerLocal from '../remote-handlers/local'
import RemoteHandlerNfs from '../remote-handlers/nfs'
import RemoteHandlerSmb from '../remote-handlers/smb'
import {
forEach
forEach,
mapToArray
} from '../utils'
import {
NoSuchObject
} from '../api-errors'
import {
Remotes
} from '../models/remote'
// ===================================================================
class NoSuchRemote extends NoSuchObject {
constructor (id) {
super(id, 'remote')
}
}
// ===================================================================
export default class {
constructor (xo) {
this._remotes = new Remotes({
@@ -30,17 +22,29 @@ export default class {
})
xo.on('start', async () => {
xo.addConfigManager('remotes',
() => this._remotes.get(),
remotes => Promise.all(mapToArray(remotes, remote =>
this._remotes.save(remote)
))
)
await this.initRemotes()
await this.syncAllRemotes()
})
xo.on('stop', () => this.forgetAllRemotes())
}
async getRemoteHandler (remote) {
async getRemoteHandler (remote, ignoreDisabled) {
if (typeof remote === 'string') {
remote = await this.getRemote(remote)
}
const Handler = {
if (!(ignoreDisabled || remote.enabled)) {
throw new Error('remote is disabled')
}
const HANDLERS = {
file: RemoteHandlerLocal,
smb: RemoteHandlerSmb,
nfs: RemoteHandlerNfs
@@ -48,14 +52,16 @@ export default class {
// FIXME: should be done in xo-remote-parser.
const type = remote.url.split('://')[0]
if (!Handler[type]) {
const Handler = HANDLERS[type]
if (!Handler) {
throw new Error('Unhandled remote type')
}
return new Handler[type](remote)
return new Handler(remote)
}
async testRemote (remote) {
const handler = await this.getRemoteHandler(remote)
const handler = await this.getRemoteHandler(remote, true)
return handler.test()
}
@@ -66,7 +72,7 @@ export default class {
async _getRemote (id) {
const remote = await this._remotes.first(id)
if (!remote) {
throw new NoSuchRemote(id)
throw noSuchObject(id, 'remote')
}
return remote
@@ -84,7 +90,7 @@ export default class {
async updateRemote (id, {name, url, enabled, error}) {
const remote = await this._getRemote(id)
this._updateRemote(remote, {name, url, enabled, error})
const handler = await this.getRemoteHandler(remote.properties)
const handler = await this.getRemoteHandler(remote.properties, true)
const props = await handler.sync()
this._updateRemote(remote, props)
return (await this._remotes.save(remote)).properties
@@ -102,7 +108,7 @@ export default class {
}
async removeRemote (id) {
const handler = await this.getRemoteHandler(id)
const handler = await this.getRemoteHandler(id, true)
await handler.forget()
await this._remotes.remove(id)
}
@@ -120,7 +126,7 @@ export default class {
const remotes = await this.getAllRemotes()
for (let remote of remotes) {
try {
(await this.getRemoteHandler(remote)).forget()
(await this.getRemoteHandler(remote, true)).forget()
} catch (_) {}
}
}

View File

@@ -2,11 +2,12 @@ import every from 'lodash/every'
import keyBy from 'lodash/keyBy'
import remove from 'lodash/remove'
import some from 'lodash/some'
import synchronized from 'decorator-synchronized'
import {
NoSuchObject,
Unauthorized
} from '../api-errors'
noSuchObject,
unauthorized
} from 'xo-common/api-errors'
import {
forEach,
generateUnsecureToken,
@@ -19,10 +20,12 @@ import {
// ===================================================================
class NoSuchResourceSet extends NoSuchObject {
constructor (id) {
super(id, 'resource set')
}
const VM_RESOURCES = {
cpus: true,
disk: true,
disks: true,
memory: true,
vms: true
}
const computeVmResourcesUsage = vm => {
@@ -54,6 +57,7 @@ const computeVmResourcesUsage = vm => {
const normalize = set => ({
id: set.id,
ipPools: set.ipPools || [],
limits: set.limits
? map(set.limits, limit => isObject(limit)
? limit
@@ -76,6 +80,13 @@ export default class {
this._store = null
xo.on('start', async () => {
xo.addConfigManager('resourceSets',
() => this.getAllResourceSets(),
resourceSets => Promise.all(mapToArray(resourceSets, resourceSet =>
this._save(resourceSet)
))
)
this._store = await xo.getStore('resourceSets')
})
}
@@ -108,7 +119,7 @@ export default class {
// The set does not contains ALL objects.
!every(objectIds, lightSet(set.objects).has)
)) {
throw new Unauthorized()
throw unauthorized()
}
}
@@ -140,14 +151,15 @@ export default class {
return store.del(id)
}
throw new NoSuchResourceSet(id)
throw noSuchObject(id, 'resourceSet')
}
async updateResourceSet (id, {
name = undefined,
subjects = undefined,
objects = undefined,
limits = undefined
limits = undefined,
ipPools = undefined
}) {
const set = await this.getResourceSet(id)
if (name) {
@@ -178,6 +190,9 @@ export default class {
}
})
}
if (ipPools) {
set.ipPools = ipPools
}
await this._save(set)
}
@@ -203,7 +218,7 @@ export default class {
getResourceSet (id) {
return this._store.get(id).then(normalize, error => {
if (error.notFound) {
throw new NoSuchResourceSet(id)
throw noSuchObject(id, 'resourceSet')
}
throw error
@@ -218,7 +233,19 @@ export default class {
async removeObjectFromResourceSet (objectId, setId) {
const set = await this.getResourceSet(setId)
remove(set.objects)
remove(set.objects, id => id === objectId)
await this._save(set)
}
async addIpPoolToResourceSet (ipPoolId, setId) {
const set = await this.getResourceSet(setId)
set.ipPools.push(ipPoolId)
await this._save(set)
}
async removeIpPoolFromResourceSet (ipPoolId, setId) {
const set = await this.getResourceSet(setId)
remove(set.ipPools, id => id === ipPoolId)
await this._save(set)
}
@@ -230,7 +257,7 @@ export default class {
async removeSubjectToResourceSet (subjectId, setId) {
const set = await this.getResourceSet(setId)
remove(set.subjects, subjectId)
remove(set.subjects, id => id === subjectId)
await this._save(set)
}
@@ -246,6 +273,7 @@ export default class {
await this._save(set)
}
@synchronized
async allocateLimitsInResourceSet (limits, setId) {
const set = await this.getResourceSet(setId)
forEach(limits, (quantity, id) => {
@@ -261,6 +289,7 @@ export default class {
await this._save(set)
}
@synchronized
async releaseLimitsInResourceSet (limits, setId) {
const set = await this.getResourceSet(setId)
forEach(limits, (quantity, id) => {
@@ -280,7 +309,9 @@ export default class {
const sets = keyBy(await this.getAllResourceSets(), 'id')
forEach(sets, ({ limits }) => {
forEach(limits, (limit, id) => {
limit.available = limit.total
if (VM_RESOURCES[id]) { // only reset VMs related limits
limit.available = limit.total
}
})
})

View File

@@ -1,9 +1,10 @@
import { BaseError } from 'make-error'
import { NoSuchObject } from '../api-errors.js'
import { Schedules } from '../models/schedule'
import { noSuchObject } from 'xo-common/api-errors.js'
import { Schedules } from '../models/schedule'
import {
forEach,
mapToArray,
scheduleFn
} from '../utils'
@@ -19,15 +20,9 @@ export class ScheduleOverride extends SchedulerError {
}
}
export class NoSuchSchedule extends NoSuchObject {
constructor (scheduleOrId) {
super(scheduleOrId, 'schedule')
}
}
export class ScheduleNotEnabled extends SchedulerError {
constructor (scheduleOrId) {
super('Schedule ' + _resolveId(scheduleOrId)) + ' is not enabled'
super('Schedule ' + _resolveId(scheduleOrId) + ' is not enabled')
}
}
@@ -42,14 +37,23 @@ export class ScheduleAlreadyEnabled extends SchedulerError {
export default class {
constructor (xo) {
this.xo = xo
this._redisSchedules = new Schedules({
const schedules = this._redisSchedules = new Schedules({
connection: xo._redis,
prefix: 'xo:schedule',
indexes: ['user_id', 'job']
})
this._scheduleTable = undefined
xo.on('start', () => this._loadSchedules())
xo.on('start', () => {
xo.addConfigManager('schedules',
() => schedules.get(),
schedules_ => Promise.all(mapToArray(schedules_, schedule =>
schedules.save(schedule)
))
)
return this._loadSchedules()
})
xo.on('stop', () => this._disableAll())
}
@@ -86,7 +90,7 @@ export default class {
_disable (scheduleOrId) {
if (!this._exists(scheduleOrId)) {
throw new NoSuchSchedule(scheduleOrId)
throw noSuchObject(scheduleOrId, 'schedule')
}
if (!this._isEnabled(scheduleOrId)) {
throw new ScheduleNotEnabled(scheduleOrId)
@@ -125,7 +129,7 @@ export default class {
const schedule = await this._redisSchedules.first(id)
if (!schedule) {
throw new NoSuchSchedule(id)
throw noSuchObject(id, 'schedule')
}
return schedule
@@ -166,7 +170,7 @@ export default class {
const { properties } = schedule
if (!this._exists(properties)) {
throw new NoSuchSchedule(properties)
throw noSuchObject(properties, 'schedule')
}
if (this._isEnabled(properties)) {
@@ -182,7 +186,7 @@ export default class {
try {
this._disable(id)
} catch (exc) {
if (!exc instanceof SchedulerError) {
if (!(exc instanceof SchedulerError)) {
throw exc
}
} finally {

View File

@@ -54,7 +54,7 @@ const levelPromise = db => {
dbP[name] = db::value
} else {
dbP[`${name}Sync`] = db::value
dbP[name] = value::promisify(db)
dbP[name] = promisify(value, db)
}
})

View File

@@ -5,11 +5,11 @@ import {
needsRehash,
verify
} from 'hashy'
import {
InvalidCredential,
NoSuchObject
} from '../api-errors'
invalidCredentials,
noSuchObject
} from 'xo-common/api-errors'
import {
Groups
} from '../models/group'
@@ -27,18 +27,6 @@ import {
// ===================================================================
class NoSuchGroup extends NoSuchObject {
constructor (id) {
super(id, 'group')
}
}
class NoSuchUser extends NoSuchObject {
constructor (id) {
super(id, 'user')
}
}
const addToArraySet = (set, value) => set && !includes(set, value)
? set.concat(value)
: [ value ]
@@ -52,22 +40,40 @@ export default class {
const redis = xo._redis
this._groups = new Groups({
const groupsDb = this._groups = new Groups({
connection: redis,
prefix: 'xo:group'
})
const users = this._users = new Users({
const usersDb = this._users = new Users({
connection: redis,
prefix: 'xo:user',
indexes: ['email']
})
xo.on('start', async () => {
if (!await users.exists()) {
xo.addConfigManager('groups',
() => groupsDb.get(),
groups => Promise.all(mapToArray(groups, group => groupsDb.save(group)))
)
xo.addConfigManager('users',
() => usersDb.get(),
users => Promise.all(mapToArray(users, async user => {
const userId = user.id
const conflictUsers = await usersDb.get({ email: user.email })
if (!isEmpty(conflictUsers)) {
await Promise.all(mapToArray(conflictUsers, ({ id }) =>
(id !== userId) && this.deleteUser(user.id)
))
}
return usersDb.save(user)
}))
)
if (!await usersDb.exists()) {
const email = 'admin@admin.net'
const password = 'admin'
await this.createUser(email, {password, permission: 'admin'})
await this.createUser({email, password, permission: 'admin'})
console.log('[INFO] Default user created:', email, ' with password', password)
}
})
@@ -75,13 +81,17 @@ export default class {
// -----------------------------------------------------------------
async createUser (email, { password, ...properties }) {
async createUser ({ name, password, ...properties }) {
if (name) {
properties.email = name
}
if (password) {
properties.pw_hash = await hash(password)
}
// TODO: use plain objects
const user = await this._users.create(email, properties)
const user = await this._users.create(properties)
return user.properties
}
@@ -100,6 +110,13 @@ export default class {
})
::pCatch(noop) // Ignore any failures.
// Remove ACLs for this user.
this._xo.getAclsForSubject(id).then(acls => {
forEach(acls, acl => {
this._xo.removeAcl(id, acl.object, acl.action)::pCatch(noop)
})
})
// Remove the user from all its groups.
forEach(user.groups, groupId => {
this.getGroup(groupId)
@@ -152,7 +169,7 @@ export default class {
async _getUser (id) {
const user = await this._users.first(id)
if (!user) {
throw new NoSuchUser(id)
throw noSuchObject(id, 'user')
}
return user
@@ -185,7 +202,7 @@ export default class {
return null
}
throw new NoSuchUser(username)
throw noSuchObject(username, 'user')
}
// Get or create a user associated with an auth provider.
@@ -203,14 +220,15 @@ export default class {
throw new Error(`registering ${name} user is forbidden`)
}
return /* await */ this.createUser(name, {
return /* await */ this.createUser({
name,
_provider: provider
})
}
async changeUserPassword (userId, oldPassword, newPassword) {
if (!(await this.checkUserPassword(userId, oldPassword, false))) {
throw new InvalidCredential()
throw invalidCredentials()
}
await this.updateUser(userId, { password: newPassword })
@@ -238,7 +256,6 @@ export default class {
// TODO: use plain objects.
const group = (await this._groups.create(name)).properties
group.users = JSON.parse(group.users)
return group
}
@@ -247,6 +264,13 @@ export default class {
await this._groups.remove(id)
// Remove ACLs for this group.
this._xo.getAclsForSubject(id).then(acls => {
forEach(acls, acl => {
this._xo.removeAcl(id, acl.object, acl.action)::pCatch(noop)
})
})
// Remove the group from all its users.
forEach(group.users, userId => {
this.getUser(userId)
@@ -266,7 +290,7 @@ export default class {
async getGroup (id) {
const group = await this._groups.first(id)
if (!group) {
throw new NoSuchGroup(id)
throw noSuchObject(id, 'group')
}
return group.properties
@@ -322,7 +346,7 @@ export default class {
if (newUsersIds.has(id)) {
newUsersIds.delete(id)
} else {
oldUsers.push(id)
oldUsersIds.push(id)
}
})
newUsersIds = newUsersIds.toArray()

View File

@@ -1,10 +1,8 @@
import { noSuchObject } from 'xo-common/api-errors'
import Xapi from '../xapi'
import xapiObjectToXo from '../xapi-object-to-xo'
import XapiStats from '../xapi-stats'
import {
GenericError,
NoSuchObject
} from '../api-errors'
import {
camelToSnakeCase,
createRawObject,
@@ -13,7 +11,8 @@ import {
isString,
noop,
pCatch,
popProperty
popProperty,
serializeError
} from '../utils'
import {
Servers
@@ -21,18 +20,10 @@ import {
// ===================================================================
class NoSuchXenServer extends NoSuchObject {
constructor (id) {
super(id, 'xen server')
}
}
// ===================================================================
export default class {
constructor (xo) {
this._objectConflicts = createRawObject() // TODO: clean when a server is disconnected.
this._servers = new Servers({
const serversDb = this._servers = new Servers({
connection: xo._redis,
prefix: 'xo:server',
indexes: ['host']
@@ -43,8 +34,13 @@ export default class {
this._xo = xo
xo.on('start', async () => {
xo.addConfigManager('xenServers',
() => serversDb.get(),
servers => serversDb.update(servers)
)
// Connects to existing servers.
const servers = await this._servers.get()
const servers = await serversDb.get()
for (let server of servers) {
if (server.enabled) {
this.connectXenServer(server.id).catch(error => {
@@ -60,11 +56,12 @@ export default class {
// TODO: disconnect servers on stop.
}
async registerXenServer ({host, username, password, readOnly = false}) {
async registerXenServer ({label, host, username, password, readOnly = false}) {
// FIXME: We are storing passwords which is bad!
// Could we use tokens instead?
// TODO: use plain objects
const server = await this._servers.create({
label: label || undefined,
host,
username,
password,
@@ -79,17 +76,22 @@ export default class {
this.disconnectXenServer(id)::pCatch(noop)
if (!await this._servers.remove(id)) {
throw new NoSuchXenServer(id)
throw noSuchObject(id, 'xenServer')
}
}
async updateXenServer (id, {host, username, password, readOnly, enabled}) {
async updateXenServer (id, {label, host, username, password, readOnly, enabled, error}) {
const server = await this._getXenServer(id)
if (label !== undefined) server.set('label', label || undefined)
if (host) server.set('host', host)
if (username) server.set('username', username)
if (password) server.set('password', password)
if (error !== undefined) {
server.set('error', error ? JSON.stringify(error) : '')
}
if (enabled !== undefined) {
server.set('enabled', enabled ? 'true' : undefined)
}
@@ -110,7 +112,7 @@ export default class {
async _getXenServer (id) {
const server = await this._servers.first(id)
if (!server) {
throw new NoSuchXenServer(id)
throw noSuchObject(id, 'xenServer')
}
return server
@@ -283,23 +285,20 @@ export default class {
xapi.xo.install()
try {
await xapi.connect()
} catch (error) {
if (error.code === 'SESSION_AUTHENTICATION_FAILED') {
throw new GenericError('authentication failed')
await xapi.connect().then(
() => this.updateXenServer(id, { error: null }),
error => {
this.updateXenServer(id, { error: serializeError(error) })
throw error
}
if (error.code === 'EHOSTUNREACH') {
throw new GenericError('host unreachable')
}
throw error
}
)
}
async disconnectXenServer (id) {
const xapi = this._xapis[id]
if (!xapi) {
throw new NoSuchXenServer(id)
throw noSuchObject(id, 'xenServer')
}
delete this._xapis[id]

View File

@@ -3,6 +3,7 @@ import XoCollection from 'xo-collection'
import XoUniqueIndex from 'xo-collection/unique-index'
import {createClient as createRedisClient} from 'redis'
import {EventEmitter} from 'events'
import { noSuchObject } from 'xo-common/api-errors'
import mixins from './xo-mixins'
import Connection from './connection'
@@ -20,9 +21,6 @@ import {
mapToArray,
noop
} from './utils'
import {
NoSuchObject
} from './api-errors'
// ===================================================================
@@ -31,6 +29,9 @@ export default class Xo extends EventEmitter {
constructor (config) {
super()
// a lot of mixins adds listener for start/stop/… events
this.setMaxListeners(0)
this._config = config
this._objects = new XoCollection()
@@ -43,7 +44,33 @@ export default class Xo extends EventEmitter {
this._httpRequestWatchers = createRawObject()
// Connects to Redis.
this._redis = createRedisClient(config.redis && config.redis.uri)
{
const {
renameCommands: rename_commands,
socket: path,
uri: url
} = config.redis || {}
this._redis = createRedisClient({ path, rename_commands, url })
}
}
// -----------------------------------------------------------------
async clean () {
const handleCleanError = error => {
console.error(
'[WARN] clean error:',
(error && error.stack) || error
)
}
await Promise.all(mapToArray(
this.listeners('clean'),
listener => new Promise(resolve => {
resolve(listener.call(this))
}).catch(handleCleanError)
))
}
// -----------------------------------------------------------------
@@ -58,7 +85,7 @@ export default class Xo extends EventEmitter {
const handleStartError = error => {
console.error(
'[WARN] start error:',
error && error.stack || error
(error && error.stack) || error
)
}
await Promise.all(mapToArray(
@@ -82,7 +109,7 @@ export default class Xo extends EventEmitter {
const handleStopError = error => {
console.error(
'[WARN] stop error:',
error && error.stack || error
(error && error.stack) || error
)
}
await Promise.all(mapToArray(
@@ -122,14 +149,14 @@ export default class Xo extends EventEmitter {
const obj = all[key] || byRef[key]
if (!obj) {
throw new NoSuchObject(key, type)
throw noSuchObject(key, type)
}
if (type != null && (
isString(type) && type !== obj.type ||
(isString(type) && type !== obj.type) ||
!includes(type, obj.type) // Array
)) {
throw new NoSuchObject(key, type)
throw noSuchObject(key, type)
}
return obj
@@ -192,7 +219,7 @@ export default class Xo extends EventEmitter {
const {fn, data} = watcher
new Promise(resolve => {
resolve(fn(req, res, data, next))
resolve(fn.call(this, req, res, data, next))
}).then(
result => {
if (result != null) {
@@ -255,7 +282,7 @@ export default class Xo extends EventEmitter {
// -----------------------------------------------------------------
// Plugins can use this method to expose methods directly on XO.
defineProperty (name, value) {
defineProperty (name, value, thisArg = null) {
if (name in this) {
throw new Error(`Xo#${name} is already defined`)
}
@@ -263,7 +290,7 @@ export default class Xo extends EventEmitter {
// For security, prevent from accessing `this`.
if (isFunction(value)) {
value = (value => function () {
return value.apply(null, arguments)
return value.apply(thisArg, arguments)
})(value)
}
@@ -279,6 +306,23 @@ export default class Xo extends EventEmitter {
return () => unset()
}
// Convenience method to define multiple properties at once.
defineProperties (props, thisArg) {
const unsets = []
const unset = () => forEach(unsets, unset => unset())
try {
forEach(props, (value, name) => {
unsets.push(this.defineProperty(name, value, thisArg))
})
} catch (error) {
unset()
throw error
}
return unset
}
// -----------------------------------------------------------------
// Watches objects changes.

View File

@@ -1,84 +0,0 @@
#!/usr/bin/env node
var join = require('path').join
var readdir = require('fs').readdirSync
var stat = require('fs').statSync
var writeFile = require('fs').writeFileSync
// ===================================================================
function bind (fn, thisArg) {
return function () {
return fn.apply(thisArg, arguments)
}
}
function camelCase (str) {
return str.toLowerCase().replace(/[^a-z0-9]+([a-z0-9])/g, function (_, str) {
return str.toUpperCase()
})
}
function removeSuffix (str, sfx) {
var strLength = str.length
var sfxLength = sfx.length
var pos = strLength - sfxLength
if (pos < 0 || str.indexOf(sfx, pos) !== pos) {
return false
}
return str.slice(0, pos)
}
// ===================================================================
function handleEntry (entry, dir) {
var stats = stat(join(dir, entry))
var base
if (stats.isDirectory()) {
base = entry
} else if (!(
stats.isFile() && (
(base = removeSuffix(entry, '.coffee')) ||
(base = removeSuffix(entry, '.js'))
)
)) {
return
}
var identifier = camelCase(base)
this(
'import ' + identifier + " from './" + base + "'",
'defaults.' + identifier + ' = ' + identifier,
'export * as ' + identifier + " from './" + base + "'",
''
)
}
function generateIndex (dir) {
var content = [
'//',
'// This file has been generated by /tools/generate-index',
'//',
'// It is automatically re-generated each time a build is started.',
'//',
'',
'const defaults = {}',
'export default defaults',
''
]
var write = bind(content.push, content)
readdir(dir).map(function (entry) {
if (entry === 'index.js') {
return
}
handleEntry.call(write, entry, dir)
})
writeFile(dir + '/index.js', content.join('\n'))
}
process.argv.slice(2).map(generateIndex)

6748
yarn.lock Normal file

File diff suppressed because it is too large Load Diff