Compare commits

..

415 Commits

Author SHA1 Message Date
Olivier Lambert
876c63fe80 4.13.1 2016-02-05 13:00:02 +01:00
Julien Fontanet
ba66af922f 4.13.0 2016-02-04 19:43:43 +01:00
Julien Fontanet
28b9bbe54f 4.13.0-0 2016-02-04 18:01:04 +01:00
Julien Fontanet
bf6bd7cbdc Merge pull request #230 from vatesfr/pierre-vm-migration-details
Fix intra-pool migration on different SRs
2016-02-04 17:30:14 +01:00
Pierre
ddcb2468a6 Minor fixes 2016-02-04 17:26:27 +01:00
Pierre
f048b58935 Fix intra-pool migration on different SRs 2016-02-04 17:17:09 +01:00
Julien Fontanet
09f6200c2e Merge pull request #209 from vatesfr/abhamonr-checksum-verification-delta-backup
Create and verify checksums for VDI delta backups
2016-02-04 16:12:54 +01:00
wescoeur
354692fb06 Add checksum verification for delta backup on restore/merge. (fix vatesfr/xo-web#617) 2016-02-04 15:22:14 +01:00
Julien Fontanet
2c5858c2e0 Merge pull request #228 from vatesfr/pierre-vm-migration-details
Fix default migration
2016-02-04 15:18:31 +01:00
Pierre
1f41fd0436 Better handle of undefined maps 2016-02-04 11:36:04 +01:00
Pierre
e0bbefdfae Fix default migration 2016-02-04 11:02:43 +01:00
Julien Fontanet
bc6fbb2797 Xo#registerPlugin(): log errors. 2016-02-04 10:32:36 +01:00
Julien Fontanet
b579cf8128 Merge pull request #227 from vatesfr/https-redirect
Can redirect to HTTPs (fix vatesfr/xo-web#626).
2016-02-04 09:55:12 +01:00
Julien Fontanet
a94ed014b7 sample config: add redirectToHttps. 2016-02-04 09:52:37 +01:00
Julien Fontanet
0db991b668 Can redirect to HTTPs. 2016-02-03 17:39:39 +01:00
Julien Fontanet
347ced6942 Merge pull request #214 from vatesfr/better-https
Better https (fix vatesfr/xo-web#685)
2016-02-03 14:32:25 +01:00
Olivier Lambert
5d7a775b2b Merge pull request #225 from vatesfr/xo-acl-resolver
Use xo-acl-resolver.
2016-02-03 14:29:31 +01:00
Julien Fontanet
df732ab4bf Merge pull request #216 from vatesfr/vdi-snapshot-type
VDI-snapshot type
2016-02-03 14:14:42 +01:00
Fabrice Marsaud
31cd3953d6 Fixing VM object properties 2016-02-03 13:56:15 +01:00
Julien Fontanet
4666b13892 Use xo-acl-resolver. 2016-02-03 11:47:02 +01:00
Julien Fontanet
37d7ddb4b0 Merge pull request #224 from vatesfr/pierre-vm-migration-details
Custom VM migration (See vatesfr/xo-web#567)
2016-02-03 11:39:53 +01:00
Fabrice Marsaud
3abbaeb44b resolving VDI snapshots 2016-02-03 10:38:09 +01:00
Fabrice Marsaud
847ea49042 VDI-snapshot type 2016-02-03 09:57:42 +01:00
Julien Fontanet
779068c2ee HTTP security: use Helmet. 2016-02-02 20:49:33 +01:00
Julien Fontanet
140cd6882d Allows full TLS config. 2016-02-02 20:45:06 +01:00
Julien Fontanet
2e295c2391 Merge pull request #213 from vatesfr/fix-cpu-weight
Fixed cpuWeight removal for default
2016-02-02 10:24:32 +01:00
Fabrice Marsaud
596b0995f4 Prepare object values for xapi 2016-02-02 10:11:39 +01:00
Fabrice Marsaud
b61fe97893 Fixed cpuWeight removal for default 2016-02-02 09:55:40 +01:00
Julien Fontanet
209aa2ebe6 Add a TODO. 2016-02-01 17:14:05 +01:00
Julien Fontanet
c03a0e857e Merge pull request #188 from vatesfr/olivierlambert-cpu-weight
Ability to set vCPU weight
2016-02-01 16:00:21 +01:00
Olivier Lambert
2854d698e6 Implement vCPU weight 2016-02-01 15:56:36 +01:00
Pierre
944163be0e Bug fix: VDIs should be on chosen SRs. 2016-01-29 10:07:02 +01:00
Julien Fontanet
269a9eaff0 Xapi: small but important fix concerning imports. 2016-01-28 17:22:44 +01:00
Olivier Lambert
7f9c49cbc4 Merge pull request #208 from vatesfr/pierre-vm-migration-details
Custom VM migration. (See vatesfr/xo-web#567)
2016-01-28 17:02:28 +01:00
Julien Fontanet
2b6bfeeb15 Merge pull request #212 from vatesfr/contrep-better-snapshot-names
continous replication: clearer VM snapshot names.
2016-01-28 16:05:03 +01:00
Julien Fontanet
fa9742bc92 continous replication: clearer VM snapshot names. 2016-01-28 15:57:25 +01:00
Pierre
472e419abc Using forEach instead of for. Minor fixes. 2016-01-28 13:33:43 +01:00
Pierre
169d11387b Custom VM migration (See vatesfr/xo-web#567)
Optional parameters for migratePool:
- Migration network
- Map each VDI to an SR on destination host
- Map each VIF to a network on destination host
2016-01-28 13:33:43 +01:00
Julien Fontanet
e59ac6d947 Fixes regarding #660. 2016-01-28 11:42:06 +01:00
Olivier Lambert
e193b45562 Merge pull request #207 from vatesfr/abhamonr-avoid-metadata-imp-exp-delta-backups
Avoid metadata import/export in delta backups. (fix vatesfr/xo-web#651)
2016-01-28 11:35:04 +01:00
wescoeur
1ac34f810e Avoid metadata import/export in delta backups. (fix vatesfr/xo-web#651) 2016-01-28 11:12:21 +01:00
Olivier Lambert
e65e5c6e5f Merge pull request #211 from vatesfr/marsaudf-clear-logs#661
Marsaudf clear logs#661
2016-01-28 10:59:37 +01:00
Fabrice Marsaud
af6365c76a logger.delete 2016-01-28 09:00:15 +01:00
Julien Fontanet
8c672b23b5 Merge pull request #159 from vatesfr/marsaudf-smb-mounts#338
Remotes refactoring + SMB implementation.
2016-01-27 11:24:52 +01:00
Fabrice Marsaud
3b53f5ac11 fixes 2016-01-27 10:58:16 +01:00
Fabrice Marsaud
ccdc744748 fixes 2016-01-27 10:08:59 +01:00
Fabrice Marsaud
261f0b4bf0 typo fix 2016-01-27 09:11:45 +01:00
Fabrice Marsaud
495b59c2e5 update dependency 2016-01-26 17:34:00 +01:00
Fabrice Marsaud
d6e1c13c39 Handler and remotes reworked 2016-01-26 17:28:27 +01:00
Fabrice Marsaud
f7f13b9e07 PR feedback 2 2016-01-26 09:47:47 +01:00
Fabrice Marsaud
62564d747f Errors moved from API to core 2016-01-25 17:29:18 +01:00
Fabrice Marsaud
1d5d59c4c0 Remote handler reworked 2016-01-25 17:01:14 +01:00
Fabrice Marsaud
e8380b8a12 PR feedback 2016-01-25 11:45:53 +01:00
Fabrice Marsaud
c304d9cc62 No vdi merge through smb 2016-01-25 11:45:53 +01:00
Fabrice Marsaud
aad4ebf287 Remote handlers refactored, and adding a smb handler 2016-01-25 11:45:53 +01:00
Olivier Lambert
6c2f48181c Merge pull request #210 from vatesfr/handle-objects-conflicts
Properly handle multiple XAPI objects with the same XO id.
2016-01-22 16:02:19 +01:00
Julien Fontanet
480b6ff7d6 Properly handle multiple XAPI objects with the same XO id.
When there is a conflict, the existing object keep the place but when
it is removed, the other object (which is in the waiting list) will
take the new place.
2016-01-22 15:57:44 +01:00
Julien Fontanet
4bdd6f972c Remove node-inspector. 2016-01-21 16:44:42 +01:00
Olivier Lambert
6674d8456a Merge pull request #206 from vatesfr/olivierlambert-fixMigration
Correctly use destination host SR and network
2016-01-20 18:31:33 +01:00
Olivier Lambert
d1478ff694 select the correct migration network 2016-01-20 18:12:37 +01:00
Julien Fontanet
cb20d46b74 Merge pull request #205 from vatesfr/abhamonr-fix-avoid-errors-delta-backups
Delta backups: Fix various issues.
2016-01-20 17:36:33 +01:00
Olivier Lambert
9dd2538043 correctly use destination host SR and network 2016-01-20 17:26:47 +01:00
wescoeur
f25136a512 Avoid errors in delta backups. (fix)
- Wait the task end of vdi export.
- Now, in the error case of vdi backup,
  the current vdi snapshot is removed with catch(noop).
2016-01-20 17:19:41 +01:00
Julien Fontanet
03eb56ad2a Xapi#_updateObjectMapProperty(): do no hide remove errors. 2016-01-20 16:04:23 +01:00
Julien Fontanet
2508840701 4.12.1 2016-01-19 12:49:37 +01:00
Julien Fontanet
6e098f5a4f Merge pull request #203 from vatesfr/fix-scheduling
Scheduler: properly use Xo#runJobSequense() (fix vatesfr/xo-web#657).
2016-01-19 12:45:36 +01:00
Julien Fontanet
31b33406fd Scheduler: properly use Xo#runJobSequense() (fix vatesfr/xo-web#657). 2016-01-19 12:12:29 +01:00
Julien Fontanet
7ab7c763ed startup: ignore non existent paths in plugins lookup. 2016-01-19 11:49:07 +01:00
Julien Fontanet
06258e757a 4.12.0 2016-01-18 10:25:41 +01:00
Julien Fontanet
5919b43a21 @mixin(): compatibility with Node 0.12 (fix #202). 2016-01-18 10:18:02 +01:00
Julien Fontanet
7d4b9521e7 Merge pull request #199 from vatesfr/continuous-replication
Continuous VM replication.
2016-01-17 23:51:29 +01:00
Julien Fontanet
f9d2fd7997 Xapi: Ugly hack seems to be working. 2016-01-17 23:28:45 +01:00
Julien Fontanet
bdbc20c3c6 Xapi: fix private put() when length is known. 2016-01-17 21:05:18 +01:00
Julien Fontanet
69d6d03714 Better debugs in Xapi. 2016-01-17 21:03:19 +01:00
Julien Fontanet
f40e1e55b0 Xapi#importVdiContent(): revert to use Promise.all() instead of Promise.race(). 2016-01-17 12:52:41 +01:00
Julien Fontanet
b9082ed838 Xapi#deleteVm(): Correctly remove VDIs with more than one VBD on the same VM. 2016-01-17 12:52:04 +01:00
Julien Fontanet
4edfefa9a2 Homogenise task names. 2016-01-17 12:52:04 +01:00
Julien Fontanet
0f98ee5407 Xapi#importVdiContent(): better task name. 2016-01-17 12:50:44 +01:00
Julien Fontanet
7fdf119873 Temporarily disable the ugly put hack. 2016-01-17 12:50:44 +01:00
Julien Fontanet
3c054e6ea1 Various changes. 2016-01-17 12:50:42 +01:00
Julien Fontanet
98899ece72 Use $ to prefix injected params names. 2016-01-17 12:49:23 +01:00
Julien Fontanet
2061a006d0 Xapi#createDeltaVdi(): correctly set the source of cloned VDI. 2016-01-17 12:49:23 +01:00
Julien Fontanet
5496c2d7fd Various fixes. 2016-01-17 12:49:22 +01:00
Julien Fontanet
d6b862a4a9 Xapi#_createVif(): Various fixes. 2016-01-17 12:49:22 +01:00
Julien Fontanet
d581f8a852 Xapi#importDeltaVm(): explicit error when base VDI is not found. 2016-01-17 12:49:22 +01:00
Julien Fontanet
3a593ee35a Xapi#_createVm(): clearer type handling. 2016-01-17 12:49:22 +01:00
Julien Fontanet
415d34fdaa Xo#copyDeltaVm(): Cancel exports on failures. 2016-01-17 12:49:22 +01:00
Julien Fontanet
7d28191bb5 Xapi#exportDeltaVm(): full export if the base is not found. 2016-01-17 12:49:20 +01:00
Julien Fontanet
e2c7693370 Xapi#importVdiContent(): do not wait for connection closure. 2016-01-17 12:48:38 +01:00
Julien Fontanet
f17ff02f4d Continuous replication: do not rely on metadata import/export. 2016-01-17 12:48:35 +01:00
Julien Fontanet
225043e01d Properly identify last snapshot as future base. 2016-01-16 19:34:35 +01:00
Julien Fontanet
56f78349f8 Xen expects keys(other_config) to be snake or it will change them itself! 2016-01-16 19:34:35 +01:00
Julien Fontanet
8839d4f55a Delete exportDeltaVm() snapshot on failure. 2016-01-16 19:34:34 +01:00
Julien Fontanet
2562aec1d2 Missing space in utils.pDebug(). 2016-01-16 19:34:34 +01:00
Julien Fontanet
db2361be84 Fix createVbd(). 2016-01-16 19:34:34 +01:00
Julien Fontanet
d08fcbfef3 Various fixes. 2016-01-16 19:34:29 +01:00
Julien Fontanet
7601b93e65 Various fixes. 2016-01-16 19:19:51 +01:00
Julien Fontanet
1103ec40e0 Xapi#importDeltaVm(): clean after failure. 2016-01-16 19:19:51 +01:00
Julien Fontanet
af32c7e3db Properly exports vm.deltaCopy(). 2016-01-16 19:19:51 +01:00
Julien Fontanet
170918eb3b Initial continuous replication. 2016-01-16 19:19:51 +01:00
Julien Fontanet
a91e615a8d @deferrable.onSuccess() 2016-01-16 19:19:51 +01:00
Julien Fontanet
cc92c26fe3 Xapi#_importVdiContent() 2016-01-16 19:19:46 +01:00
Julien Fontanet
937135db32 Xapi#_exportVdi() 2016-01-16 18:57:15 +01:00
Julien Fontanet
01366558b4 Xapi#_deleteVbd() 2016-01-16 18:57:15 +01:00
Julien Fontanet
b0dbd54ea4 Xapi#_disconnectVbd() 2016-01-16 18:57:15 +01:00
Julien Fontanet
f113915307 Xapi#_updateObjectMapProperty() can remove a property. 2016-01-16 18:57:15 +01:00
Julien Fontanet
0a3c3d9bb1 Xapi#remoteCopyVm() falls back on local copy if possible. 2016-01-16 18:50:09 +01:00
Julien Fontanet
ba2e005c3e Merge pull request #201 from vatesfr/custom-http-request
Custom HTTP request implementation instead of got.
2016-01-16 18:31:21 +01:00
Julien Fontanet
b9ea52d65f Add missing space in forbidden operations description. 2016-01-16 18:27:23 +01:00
Julien Fontanet
f1e328d333 Better error handling in patch unzipping. 2016-01-16 18:13:13 +01:00
Julien Fontanet
23f1965398 Custom HTTP request implementation instead of got. 2016-01-16 18:13:04 +01:00
Olivier Lambert
fc82f185cb Merge pull request #200 from vatesfr/abhamonr-forever-forward-incremental-backup-fix
Old vdi bases must be removed at the backup end.
2016-01-15 14:31:04 +01:00
wescoeur
56b25f373f Old vdi bases must be removed at the backup end. 2016-01-15 14:20:11 +01:00
Olivier Lambert
1ac6add122 Merge pull request #196 from vatesfr/abhamonr-forever-forward-incremental-backup
Forever forward incremental backup (fix vatesfr/xo-web#576)
2016-01-15 14:13:02 +01:00
wescoeur
91b1a903f9 Fix rejected backup. 2016-01-15 13:48:15 +01:00
wescoeur
a8d6654ef5 Forever forward incremental backup (fix vatesfr/xo-web#576) 2016-01-15 13:12:05 +01:00
Olivier Lambert
63093b1be6 Merge pull request #198 from vatesfr/abhamonr-vbd-set-bootable-fix-getxapi
vbd.setBootable use xo.getXapi() instead of xo.getXAPI()
2016-01-14 16:28:16 +01:00
wescoeur
60abe8f37e vbd.setBootable use xo.getXapi() instead of xo.getXAPI() 2016-01-14 16:22:16 +01:00
Olivier Lambert
7ba3909aa1 Merge pull request #175 from vatesfr/abhamonr-button-bootable-disk
Add vbd.setBootable api call.
2016-01-14 16:04:30 +01:00
Julien Fontanet
eecdba2d05 Merge pull request #197 from vatesfr/deferrable-decorator
deferrable() decorator.
2016-01-14 14:33:20 +01:00
Julien Fontanet
7bdc005aa7 @deferrable() works with async functions. 2016-01-14 14:24:09 +01:00
Julien Fontanet
d46703fdc4 Cosmetic changes in decorators spec. 2016-01-14 11:58:18 +01:00
Julien Fontanet
e4aa85f603 Cosmetic changes in decorators. 2016-01-14 11:58:18 +01:00
Julien Fontanet
233124ef50 deferrable.onFailure() 2016-01-14 11:58:11 +01:00
Julien Fontanet
36a3012de2 deferrable() decorator. 2016-01-14 11:16:51 +01:00
Olivier Lambert
2b4ee96ed7 Fix issue vatesfr/xo-web/issues/643 2016-01-13 18:55:35 +01:00
Julien Fontanet
85a2afd55c Add --safe-mode which do not registers plugins. 2016-01-13 15:53:55 +01:00
Julien Fontanet
6cd0d8456a Fix plugins (broken by Xo split). 2016-01-13 15:22:14 +01:00
Julien Fontanet
7750a0a773 Integrate api/xo-mixins indexes to the build. 2016-01-13 15:21:03 +01:00
Julien Fontanet
a5364b9257 Camel case: Xo#getXAPI() → Xo#getXapi(). 2016-01-13 14:39:40 +01:00
Julien Fontanet
e0e7b1406d Fix backups listing (broken by Xo split). 2016-01-13 12:12:40 +01:00
Julien Fontanet
38b67a0002 Merge pull request #192 from vatesfr/mixins
Split Xo with mixins.
2016-01-13 11:47:49 +01:00
Julien Fontanet
18dd4f8a52 Print start/stop errors. 2016-01-13 11:40:52 +01:00
Julien Fontanet
879f9b4ea9 Remove listeners after start/stop. 2016-01-13 11:40:51 +01:00
Julien Fontanet
3db0dda67a Fix a race condition in the scheduler. 2016-01-13 11:40:51 +01:00
Julien Fontanet
ed9ee15b90 Expose Xo#scheduler. 2016-01-13 11:40:51 +01:00
Julien Fontanet
44ff85e8e9 Rename Xo {start,stop}{,ing} events. 2016-01-13 11:40:51 +01:00
Julien Fontanet
cb07e9ba11 Split Xo with mixins. 2016-01-13 11:40:48 +01:00
Julien Fontanet
bfe05ce5fc Merge pull request #184 from vatesfr/abhamonr-disable-vm-start-during-delta-import
Disable vm start during delta import and explicit notification.
2016-01-13 11:25:56 +01:00
wescoeur
64ee23cec0 Disable vm start during delta import and explicit notification. (fix vatesfr/xo-web#613) 2016-01-13 11:20:58 +01:00
Julien Fontanet
c022d3c4a4 Merge pull request #182 from vatesfr/abhamonr-properly-remove-vdi-backups
Only delete VDI exports when VM backup is successful.
2016-01-13 10:38:15 +01:00
wescoeur
69c764301f Only delete VDI exports when VM backup is successful (fix vatesfr/xo-web#644). 2016-01-13 10:33:44 +01:00
Julien Fontanet
2f777daef6 Merge pull request #168 from vatesfr/cleaner-xo-stop
xo-server should properly stops on SIGINT/SIGTERM.
2016-01-12 17:38:30 +01:00
Julien Fontanet
a10bf7330e xo-server should properly stops on SIGINT/SIGTERM. 2016-01-12 17:33:32 +01:00
Julien Fontanet
782bb5967d Update level-party to 3.0.4. 2016-01-12 15:17:15 +01:00
Olivier Lambert
aeb2f55f0d Merge pull request #191 from vatesfr/prevent-concurrent-schedule-runs
A schedule cannot have concurrent runs (fix vatesfr/xo-web#642).
2016-01-11 14:43:24 +01:00
Julien Fontanet
ae68749b1b A schedule cannot have concurrent runs (fix vatesfr/xo-web#642). 2016-01-11 14:00:52 +01:00
Julien Fontanet
a3c25d56a0 Update deps. 2016-01-08 18:39:59 +01:00
Julien Fontanet
d2b9cc8df9 Merge pull request #189 from vatesfr/olivierlambert-change-name-during-import
Change VM name during VM delta import. Fix vatesfr/xo-web/issues/641
2016-01-07 14:08:45 +01:00
Olivier Lambert
2027daa75c Change name during VM delta import. Fix vatesfr/xo-web/issues/641 2016-01-07 14:02:53 +01:00
Julien Fontanet
f3493a08bd Api#addMethod() returns a remover function. 2016-01-05 18:16:04 +01:00
Julien Fontanet
f3963269ae Initialize FAT buffer with null bytes. 2016-01-04 14:43:49 +01:00
Julien Fontanet
ae2212c245 Merge pull request #183 from vatesfr/pierre-delete-running-vm
Delete not halted VMs. (vatesfr/xo-web/issues/616)
2015-12-31 09:48:37 +01:00
Julien Fontanet
3a19ac4c93 Merge pull request #187 from vatesfr/olivierlambert-vif-deletion
VIF delete typo. Fix issue vatesfr/xo-web/issues/632
2015-12-30 20:01:49 +01:00
Olivier Lambert
666f546cf0 VIF delete typo. Fix issue vatesfr/xo-web/issues/632 2015-12-30 19:57:48 +01:00
Julien Fontanet
464f57d7da Merge pull request #186 from vatesfr/olivierlambert-custom-templates
add 'install_repository' support for vatesfr/xo-web/issues/627
2015-12-30 17:21:12 +01:00
Olivier Lambert
2a192f33a1 add 'install_repository' support for vatesfr/xo-web/issues/627 2015-12-30 17:12:26 +01:00
Julien Fontanet
9ca2674261 Make unhandled rejected promises visible on exit. 2015-12-29 10:39:26 +01:00
Julien Fontanet
24bc91dc0c Minor optimizations. 2015-12-23 13:57:29 +01:00
Julien Fontanet
cf2d5b502f Do not remove VDIs attached to other VMs. 2015-12-22 16:27:34 +01:00
Julien Fontanet
61450ef602 Typo. 2015-12-22 16:24:50 +01:00
Julien Fontanet
78f1d1738e Properly ignore snapshot deletion failures after export. 2015-12-22 16:24:00 +01:00
Pierre
9f595cf5f7 Delete not halted VMs. (See vatesfr/xo-web#616) 2015-12-22 15:45:13 +01:00
Julien Fontanet
25b8e49975 4.11.0 2015-12-22 13:35:24 +01:00
Julien Fontanet
d40086cd13 Merge branch 'next-release' into stable 2015-12-22 13:34:57 +01:00
Olivier Lambert
8f9d8d93b9 Merge pull request #181 from vatesfr/fix-vbd-state-after-metadata-import
Fix vbd state after metadata import
2015-12-22 12:23:38 +01:00
Julien Fontanet
1080c10004 Call VM.power_state_reset after a metadata import (fix vatesfr/xo-web#615). 2015-12-22 12:18:02 +01:00
Julien Fontanet
866aeca220 Revert "Snapshots running VM for metadata export (see vatesfr/xo-web#615)."
This reverts commit 121b3afc61.

It is not possible to export metadata of a snapshot.
2015-12-22 11:41:04 +01:00
Julien Fontanet
121b3afc61 Snapshots running VM for metadata export (see vatesfr/xo-web#615). 2015-12-22 11:26:03 +01:00
Julien Fontanet
e8406b04b4 Merge pull request #180 from vatesfr/fix-memory-issue-importVmBackup
Fix memory issue on Xo#importVmBackup().
2015-12-21 19:46:06 +01:00
Julien Fontanet
8e7fe81806 Disable async traces for now (fix vatesfr/xo-web#608). 2015-12-21 19:20:48 +01:00
Olivier Lambert
852807b5d7 Merge pull request #178 from vatesfr/abhamonr-incremental-backups-integration
Some corrections for integration.
2015-12-21 19:05:57 +01:00
Olivier Lambert
9928d47fa2 PR comment review 2015-12-21 19:00:36 +01:00
wescoeur
412a1bd62a Some corrections for delta integration in xo-web.
- List delta backups in subfolders.
- Fix unhandled exception. (ENOENT)
- ...
2015-12-21 17:48:34 +01:00
Julien Fontanet
b290520951 vm.import() accepts a SR id. 2015-12-21 11:13:42 +01:00
Olivier Lambert
dde677b6d3 do NOT backup a CD drive 2015-12-18 21:45:03 +01:00
Julien Fontanet
75030847bd Merge pull request #177 from vatesfr/abhamonr-fix-remote-importVm
The vm import call use a sr instead of a host.
2015-12-18 17:23:48 +01:00
wescoeur
e7b9cb76bc The vm import call use a sr instead of a host. 2015-12-18 17:18:39 +01:00
Olivier Lambert
e96c4c0dd3 restore accidently removed code 2015-12-18 16:56:25 +01:00
Julien Fontanet
b553b3fa50 Merge pull request #176 from vatesfr/olivierlambert-xenstorefix
Fix undefined xenstore_data sent to XAPI
2015-12-18 16:06:50 +01:00
Olivier Lambert
c6fb924b8f Fix undefined xenstore_data sent to XAPI 2015-12-18 16:00:06 +01:00
Julien Fontanet
b13844c4a6 Merge pull request #170 from vatesfr/pierre-read-only-connection
Connection to a Xen Server in read-only mode. (Fix vatesfr/xo-web#439)
2015-12-18 12:09:34 +01:00
Pierre
ab6c83a3fc Connection to a Xen Server in read-only mode. (Fix vatesfr/xo-web#439)
`updateXenServer` applies changes in database and also changes the connection's read-only status if the client is connected to this server.
2015-12-18 11:59:54 +01:00
Julien Fontanet
7e0a97973f Merge pull request #163 from vatesfr/abhamonr-incremental-backups
Implement rolling delta VM backup and import. (vatesfr/xo-web#494)
2015-12-18 11:24:31 +01:00
wescoeur
6a8a79bba5 Implement rolling delta VM backup and import. (vatesfr/xo-web#494) 2015-12-18 11:20:13 +01:00
wescoeur
4a0c58c50a Add vbd.setBootable api call. (fix vatesfr/xo-web/#583) 2015-12-17 17:07:35 +01:00
Julien Fontanet
eb0c963332 Coding style. 2015-12-17 16:21:25 +01:00
Julien Fontanet
023fe82932 Merge pull request #150 from vatesfr/olivierlambert-configdrive
Generic CloudConfig Drive
2015-12-16 18:19:51 +01:00
Julien Fontanet
2e1a06c7bf Generic Cloud Config drive. Fix vatesfr/xo-web/issues/549 2015-12-16 18:12:47 +01:00
Julien Fontanet
8b6961d40c VDI.resize{,_online} expect a string contrary to what the doc says. 2015-12-16 16:33:55 +01:00
Julien Fontanet
53351877da Fix typo: size → parseSize. 2015-12-16 15:48:33 +01:00
Julien Fontanet
522445894e Always parse sizes. 2015-12-16 15:36:01 +01:00
Julien Fontanet
550351bb16 Merge pull request #174 from vatesfr/abhamonr-import-vdi-content
Import VDI content is implemented.
2015-12-16 15:06:32 +01:00
wescoeur
328adbb56f Import VDI content is implemented. 2015-12-16 14:59:53 +01:00
Julien Fontanet
44a36bbba3 Use human-format v6 (much nicer with incorrect casing). 2015-12-16 13:17:07 +01:00
Julien Fontanet
4cc4adeda6 disk.{create,resize}() accept integers for size. 2015-12-16 11:34:46 +01:00
Julien Fontanet
c14e6f2a63 disk.resize() accepts human readable size. 2015-12-16 11:34:46 +01:00
Julien Fontanet
cfcb2d54d8 Merge pull request #172 from vatesfr/olivierlambert-vdimove
Allow offline VDI moving. Fix vatesfr/xo-web#591
2015-12-16 10:46:33 +01:00
Julien Fontanet
010d60e504 Coding style. 2015-12-16 09:46:39 +01:00
Julien Fontanet
eabde07ff6 Remove incorrect export. 2015-12-16 09:46:39 +01:00
Olivier Lambert
be19ad5f2a Allow offline VDI moving. Fix https://github.com/vatesfr/xo-web/issues/591 2015-12-15 19:07:20 +01:00
Julien Fontanet
d1d0816961 Merge pull request #171 from vatesfr/olivierlambert-existingdisks
allow edition of existing disks during VM creation
2015-12-15 17:56:38 +01:00
Olivier Lambert
7be7170504 allow edition of existing disks during VM creation 2015-12-15 17:51:03 +01:00
Julien Fontanet
478272f515 Merge pull request #167 from vatesfr/remoteCopyVm-set-nameLabel-asap
Xapi#remoteCopyVm() sets name label ASAP.
2015-12-15 13:49:13 +01:00
Julien Fontanet
09af6958c8 Xapi#remoteCopyVm() sets name label ASAP. 2015-12-15 11:35:03 +01:00
Julien Fontanet
adb3a2b64e Merge pull request #169 from vatesfr/olivierlambert-vdiresize
Clean VDI resize support
2015-12-14 17:09:12 +01:00
Olivier Lambert
1ee7e842dc Clean VDI resize support 2015-12-14 17:05:59 +01:00
Julien Fontanet
b080a57406 Merge pull request #162 from vatesfr/pierre-aborting-vm-export-cancels-operation
VM-export interruption properly transferred to Xen
2015-12-11 18:18:05 +01:00
Julien Fontanet
7c017e345a Minor updates. 2015-12-11 18:02:41 +01:00
Pierre
4b91343155 VM-export interruption properly transferred to Xen. (Fix vatesfr/xo-web#490)
When the connection with the client is lost, the export task is cancelled and the connection is closed.
As the task is over, the snapshot used for the export is deleted.

Cancelling the task is useless as it is cancelled by Xen
2015-12-11 18:02:41 +01:00
Julien Fontanet
02a3df8ad0 Merge pull request #164 from vatesfr/olivierlambert-import-metadata
Support VM metadata import. Fix vatesfr/xo-web#579
2015-12-11 17:01:47 +01:00
Olivier Lambert
6a7080f4ee Support VM metadata import. Fix vatesfr/xo-web#579 2015-12-11 16:56:56 +01:00
Julien Fontanet
4547042577 Fix issue in utils.createRawObject(). 2015-12-10 14:06:22 +01:00
Julien Fontanet
0e39eea7f8 Always use noop from utils. 2015-12-10 14:06:22 +01:00
Olivier Lambert
1e5aefea63 Merge pull request #161 from vatesfr/olivierlambert-set-default-sr
Set default sr
2015-12-10 12:14:27 +01:00
Olivier Lambert
02c4f333b0 minor fixes 2015-12-10 12:08:30 +01:00
Olivier Lambert
1e8fc4020b Add setDefaultSr, fix #572 2015-12-10 11:01:05 +01:00
Olivier Lambert
f969701ac1 Merge pull request #155 from vatesfr/pierre-plugins-autodiscovery
Installed plugins are detected even if the config file does not show …
2015-12-09 20:41:46 +01:00
Olivier Lambert
b236243857 Merge pull request #160 from vatesfr/marsaudf-no-backup-if-unmounted
Rolling backup fails immediatly if remote is disabled. Fix https://github.com/vatesfr/xo-web/issues/561
2015-12-09 19:13:01 +01:00
Fabrice Marsaud
39edc64922 Rolling backup fails immediatly if remote is disabled 2015-12-09 17:18:50 +01:00
Pierre
f22ece403f Installed plugins are automatically detected when the server starts. They are no more loaded from the config file.
Plugins (ie installed modules of which names start with `xo-server-`) are automatically detected and registered from `${__dirname}/node_modules/` and `/usr/local/lib/node_modules/`.
2015-12-09 16:56:48 +01:00
Julien Fontanet
f5423bb314 Merge pull request #158 from vatesfr/olivierlambert-recoveryStart
Generic recovery start (PV or HVM)
2015-12-07 18:11:03 +01:00
Julien Fontanet
b1e5945ebe Xapi#startVm() and Xapi#startVmOnCd() (fix vatesfr/xo-web#563). 2015-12-07 18:02:55 +01:00
Julien Fontanet
76b5be8171 Revert "Freeze config object before configuring plugin."
This reverts commit 789f51bd2a.

The change introduced issues with nodemailer used in xo-server-transport-email.
2015-12-03 16:00:32 +01:00
Julien Fontanet
804bca2041 Merge pull request #148 from vatesfr/abhamonr-purge-plugin-config
The plugins configurations can be cleaned.
2015-12-03 14:37:32 +01:00
Julien Fontanet
10602b47b4 4.10.2 2015-12-03 12:20:48 +01:00
Julien Fontanet
8d7c522596 Merge pull request #154 from vatesfr/julienf-fix-patches-for-6.1
Fix patches handling for XenServer 6.1.
2015-12-03 12:20:05 +01:00
Julien Fontanet
3ac455c5a7 Fix patches handling for XenServer 6.1. 2015-12-03 12:16:56 +01:00
Julien Fontanet
2b19a459df Merge pull request #152 from vatesfr/julienf-handle-xapiToXo-failures
Handle XAPI to XO objects failures.
2015-12-03 12:15:29 +01:00
Julien Fontanet
41ba2d9bf6 Properly schedule retry for the next loop. 2015-12-03 12:10:29 +01:00
Julien Fontanet
a7b5eb69d3 Handle XAPI to XO objects failures. 2015-12-03 11:13:00 +01:00
Julien Fontanet
67c209bb5e Properly handle the case where the pool object is not here yet. 2015-12-03 10:27:23 +01:00
Julien Fontanet
a6d436d9ea 4.10.1 2015-12-02 17:45:30 +01:00
Julien Fontanet
652c784e13 Update xen-api to 0.6.8 (fix vatesfr/xo-web#552). 2015-12-02 17:41:32 +01:00
wescoeur
a0a3b7a158 The plugins configurations can be cleaned. 2015-12-02 16:15:23 +01:00
Julien Fontanet
789f51bd2a Freeze config object before configuring plugin. 2015-12-02 15:20:38 +01:00
Olivier Lambert
c2f1a74f96 Merge pull request #149 from vatesfr/julienf-fix-vm-migration-collision
Use a different id (opaque ref) for VMs which are under migration.
2015-11-30 17:40:31 +01:00
Julien Fontanet
a9ed7a3f3b Use a different id (opaque ref) for VMs which are under migration. 2015-11-30 17:34:12 +01:00
Julien Fontanet
b348e88a5f 4.10.0 2015-11-27 14:24:08 +01:00
Julien Fontanet
1615395866 Merge branch 'next-release' into stable 2015-11-27 14:23:57 +01:00
Julien Fontanet
e483abcad0 Merge pull request #130 from vatesfr/marsaudf-generic-job-schedules
Enhancements to prepare generic job scheduling
2015-11-27 12:03:27 +01:00
Fabrice Marsaud
12b6760f6e Extend job & schedule API with job & schedule names, and job.runSequence 2015-11-27 11:56:37 +01:00
Julien Fontanet
6fde6d7eac Expose plugin config validation errors (vatesfr/xo-web#530). 2015-11-26 16:18:39 +01:00
Julien Fontanet
a7ef891217 Merge pull request #146 from vatesfr/abhamonr-abort-vm-export-import
Start VM export only when necessary.
2015-11-26 16:10:59 +01:00
wescoeur
8f22dfe87b Start VM export only when necessary. 2015-11-26 15:42:07 +01:00
Julien Fontanet
2dc7fab39a Merge pull request #134 from vatesfr/abhamonr-backup-jobs-notifications
Xo event `job:terminated` on job termination.
2015-11-26 15:29:39 +01:00
wescoeur
74cb2e3c63 'job:terminated' signal is emitted after job execution. (with one executionStatus object) 2015-11-26 11:07:37 +01:00
Julien Fontanet
6e763a58f1 Expose UUIDs on all Xapi objects which have one. 2015-11-25 16:53:25 +01:00
Julien Fontanet
a8e72ed410 Merge pull request #140 from vatesfr/julienf-xo-defineProperty
Xo#defineProperty() to properly expose attributes
2015-11-25 14:16:55 +01:00
Julien Fontanet
fcdfd5f936 Merge pull request #139 from vatesfr/abhamonr-remember-disabled-servers
Save enabled state for each server
2015-11-25 13:54:44 +01:00
Julien Fontanet
f1faa463c1 Xo#defineProperty() allows (plugins) to define property on Xo instance 2015-11-25 12:27:09 +01:00
wescoeur
a0f4952b54 Save enabled state for each server 2015-11-25 12:15:15 +01:00
Olivier Lambert
bd82ded07d automatically set autopoweron on the pool 2015-11-25 12:06:16 +01:00
Julien Fontanet
016e17dedb Merge pull request #145 from vatesfr/julienf-pool-autopoweron
Xapi#setPoolProperties() supports autoPowerOn.
2015-11-25 12:03:23 +01:00
Julien Fontanet
5cd3e1b368 Xapi#setPoolProperties() supports autoPowerOn. 2015-11-25 11:03:33 +01:00
Julien Fontanet
b2b39458da Merge pull request #144 from vatesfr/julienf-fix-redis-items-updates
Fix items updates in Redis.
2015-11-25 10:57:54 +01:00
Julien Fontanet
07288b3f26 Fix items updates in Redis. 2015-11-25 10:39:50 +01:00
Julien Fontanet
90f79b7708 Shallow copy the configuration object before configuring a plugin. (fix vatesfr/xo-web#513) 2015-11-24 11:25:52 +01:00
Julien Fontanet
e220786a20 Merge pull request #142 from vatesfr/marsaudf-parameter-fix
Fix: group.getAll() has no params.
2015-11-24 10:49:51 +01:00
Fabrice Marsaud
f16b993294 Removed unwanted API parameter 2015-11-24 10:42:43 +01:00
Julien Fontanet
c241bea3bf Add vendor config file. 2015-11-20 18:28:32 +01:00
Julien Fontanet
084654cd3c Merge branch 'stable' into next-release 2015-11-20 18:17:08 +01:00
Julien Fontanet
d21742afb6 4.9.2 2015-11-20 18:05:26 +01:00
Julien Fontanet
b5259384e8 Merge pull request #138 from vatesfr/julienf-fix-tokens-expiration
Auth tokens expires after one month (side effect: remove old tokens).
2015-11-20 18:05:03 +01:00
Julien Fontanet
bf78ad9fbe Auth tokens expires after one month (side effect: remove old tokens). 2015-11-20 17:42:18 +01:00
Olivier Lambert
ab3577c369 Merge pull request #136 from vatesfr/olivierlambert-cloudconfig
Cloud config management for CoreOS
2015-11-20 17:30:53 +01:00
Olivier Lambert
6efb90c94e Merge pull request #135 from vatesfr/pierre-emergency-host-shutdown
emergencyHostShutdown(hostId) : suspends all the VMs running on the host
2015-11-20 17:30:27 +01:00
Olivier Lambert
cbcc400eb4 Cloud config management for CoreOS 2015-11-20 17:12:12 +01:00
Julien Fontanet
15aec7da7e vm.clone() requires permissions on SRs. 2015-11-20 16:19:33 +01:00
Julien Fontanet
46535e4f56 Utils: pAll() & pReflect() 2015-11-20 15:31:55 +01:00
Julien Fontanet
e3f945c079 Minor fixes. 2015-11-20 14:34:07 +01:00
Julien Fontanet
04239c57fe pSettle() returns an object for an object. 2015-11-20 11:42:49 +01:00
Pierre
ad4439ed55 emergencyHostShutdown(hostId) : suspends all the VMs running on the host and then shuts the host down 2015-11-20 11:26:03 +01:00
Julien Fontanet
9fe3ef430f More tests for pSettle(). 2015-11-20 10:43:56 +01:00
Julien Fontanet
ff30773097 Fix pSettle() to accept non-promises. 2015-11-20 10:43:56 +01:00
Julien Fontanet
f7531d1e18 pSettle() accepts either arrays or objects. 2015-11-20 10:43:56 +01:00
Olivier Lambert
658008ab64 add comment for quiesce 2015-11-19 16:03:28 +01:00
Olivier Lambert
b089d63112 allow snapshots on halted VMs 2015-11-19 16:01:08 +01:00
Julien Fontanet
ee9b1b7f57 Merge pull request #133 from vatesfr/abhamonr-validate-config-plugins-registration
Avoid plugin loading if config is not valid
2015-11-19 14:40:33 +01:00
wescoeur
cd0fc8176f Avoid plugin loading if config is not valid 2015-11-19 13:51:25 +01:00
Julien Fontanet
8e291e3e46 Define CoffeeScript API modules as ES6 (fix default value). 2015-11-19 13:13:53 +01:00
Julien Fontanet
e3024076cd Merge pull request #131 from vatesfr/abhamonr-delete-user-with-tokens
remove tokens on user deletion
2015-11-19 12:55:38 +01:00
wescoeur
6105874abc remove tokens on user deletion 2015-11-19 12:40:31 +01:00
Julien Fontanet
1855f7829d Advanced setting: verboseApiLogsOnErrors. 2015-11-19 11:11:54 +01:00
Julien Fontanet
456e8bd9c0 New FIXME. 2015-11-19 11:11:54 +01:00
Julien Fontanet
d5f2efac26 Merge pull request #132 from vatesfr/abhamonr-add-leveldown-dep
Add leveldown dep
2015-11-19 10:45:17 +01:00
wescoeur
21e692623c Add leveldown dep 2015-11-19 10:40:11 +01:00
Julien Fontanet
80e9589af5 Initial --repair command in xo-server-logs. 2015-11-19 10:11:01 +01:00
Julien Fontanet
b2b9ae0677 Quick fix for groups.getAll() attributes. 2015-11-19 09:26:34 +01:00
Julien Fontanet
63122905e6 Comments. 2015-11-18 18:36:27 +01:00
Julien Fontanet
f99b6f4646 Merge pull request #123 from vatesfr/abhamonr-logs-cli
CLI to explore xo-server's logs.
2015-11-18 17:01:26 +01:00
wescoeur
39090c2a22 Logs CLI:
- Can print logs for one namespace or all namespaces
- Can sort logs since one start timestamp/until one end timestamp
- The sort results can be limited by one value
2015-11-18 16:57:28 +01:00
Julien Fontanet
76baa8c791 Minor fix. 2015-11-18 16:40:08 +01:00
Julien Fontanet
74e4b9d6d2 Merge imports. 2015-11-18 15:24:38 +01:00
Julien Fontanet
bbfc5039f7 Merge pull request #129 from vatesfr/abhamonr-ghost-user
Avoid ghost user and ghost group.
2015-11-18 15:14:31 +01:00
wescoeur
b2fd694483 Avoid ghost user and ghost group. 2015-11-18 15:02:54 +01:00
Julien Fontanet
b03f38ff22 Include user name in console proxy logs. 2015-11-17 16:24:22 +01:00
Julien Fontanet
fe48811047 Include user name in API logs. 2015-11-17 16:24:07 +01:00
Julien Fontanet
bd9396b031 Ability to not create users on first sign in (fix #497). 2015-11-17 15:59:31 +01:00
Julien Fontanet
f0497ec16d Move default configuration to config.json. 2015-11-17 15:49:45 +01:00
Julien Fontanet
7e9e179fa7 Minor fixes. 2015-11-17 15:01:58 +01:00
Julien Fontanet
de62464ad8 Improve security: check token for console access. 2015-11-17 15:01:58 +01:00
Julien Fontanet
f6911ca195 Merge pull request #128 from vatesfr/olivierlambert-setbootorder
Rename vm.bootOrder() to vm.setBootOrder() and ensure VM is HVM.
2015-11-17 15:00:03 +01:00
Olivier Lambert
aec09ed8d2 Rename vm.bootOrder() to vm.setBootOrder() and ensure VM is HVM 2015-11-17 14:56:15 +01:00
Julien Fontanet
51a983e460 Logs clients IPs for WebSocket connections. 2015-11-17 13:24:07 +01:00
Julien Fontanet
0eb46e29c7 Merge pull request #122 from vatesfr/fix-poolPatches-removal
Properly remove objects for which `xo.id !== xapi.$id`.
2015-11-17 11:00:05 +01:00
Julien Fontanet
5ee11c7b6b Properly remove objects for which xo.id !== xapi.$id. 2015-11-17 10:33:56 +01:00
Olivier Lambert
b55accd76f add tag for quiesced snapshots 2015-11-16 12:41:16 +01:00
Julien Fontanet
fef2be1bc7 Merge pull request #125 from vatesfr/olivierlambert-snapquiesce
Add VM snapshot quiesce support
2015-11-16 10:59:39 +01:00
Olivier Lambert
0b3858f91d Add VM snapshot quiesce support 2015-11-16 10:55:51 +01:00
Julien Fontanet
d07ea1b337 Explicit Node versions compatibility. 2015-11-16 10:31:28 +01:00
Julien Fontanet
7e2dbc7358 4.9.1 2015-11-13 17:08:54 +01:00
Julien Fontanet
c676f08a7c Use correct host to import VM without known length. 2015-11-13 16:53:00 +01:00
Julien Fontanet
92f24b5728 Sets a long timeout for vm.import(). 2015-11-13 16:08:44 +01:00
Julien Fontanet
0254e71435 4.9.0 2015-11-13 11:27:40 +01:00
Julien Fontanet
2972fc5814 Merge pull request #124 from vatesfr/abhamonr-logs-avoid-sublevel
avoid sublevel for logs
2015-11-13 11:08:44 +01:00
wescoeur
975c96217c avoid sublevel for logs 2015-11-13 11:03:02 +01:00
Julien Fontanet
c30c1848bc Fix VM naming in vm.rollingDrCopy(). 2015-11-12 18:21:35 +01:00
Julien Fontanet
94615d3b36 Fix optional options in Xapi#remoteCopyVm(). 2015-11-12 18:21:12 +01:00
Julien Fontanet
37a00f0d16 Minor fixes. 2015-11-12 15:25:34 +01:00
Julien Fontanet
0dbe70f5af vm.copy() handles running VMs and compression can be disabled. 2015-11-12 11:02:05 +01:00
Fabrice Marsaud
7584374b0b Merge pull request #119 from vatesfr/marsaudf-DR-API
Disaster recovery feature
2015-11-12 10:24:57 +01:00
Fabrice Marsaud
71ca51dc1a Disaster recovery feature 2015-11-12 10:20:57 +01:00
Fabrice Marsaud
aa81e72e45 Merge pull request #115 from vatesfr/abhamonr-logger-module
Logger is implemented.
2015-11-12 10:16:04 +01:00
wescoeur
9954bb9c15 Logger is implemented.
Logger save jobs events : (start, end, call start, call end)
Logs can be obtained in api.
2015-11-12 09:59:56 +01:00
Julien Fontanet
e5c0250423 Xapi#importVm() crash on failure. 2015-11-10 21:28:01 +01:00
Olivier Lambert
135799ed5e Merge pull request #121 from vatesfr/marsaudf-backup-fix
Fixed rolling tag detection
2015-11-10 16:24:35 +01:00
Fabrice Marsaud
22c3b57960 Fixed backup rolling bug 2015-11-10 16:18:09 +01:00
Fabrice Marsaud
7054dd74a4 Merge pull request #120 from vatesfr/marsaudf-function-move
Moving rollingSnapshot form xapi to xo
2015-11-10 15:34:49 +01:00
Fabrice Marsaud
d4f1e52ef6 fix 2015-11-10 09:04:11 +01:00
Fabrice Marsaud
76a44459cf Moving rollingSnapshot form xapi to xo 2015-11-10 08:49:38 +01:00
Julien Fontanet
a5590b090c Merge pull request #116 from vatesfr/julien-f-vm-copy
vm.copy() can a copy a VM on a remote or local SR.
2015-11-09 15:14:04 +01:00
Julien Fontanet
74c9a57070 Replace lodash.assign() with ES2016 object spread. 2015-11-09 15:07:42 +01:00
Julien Fontanet
06e283e070 vm.copy() can a copy a VM on a remote or local SR. 2015-11-09 14:40:49 +01:00
Julien Fontanet
8ab2ca3f24 Remove unnecessary Bluebird-isms. 2015-11-09 12:05:18 +01:00
Julien Fontanet
0eb949ba39 Move often used lodash utils to utils.js 2015-11-09 12:03:34 +01:00
Julien Fontanet
be35693814 Minor rewrite in utils. 2015-11-09 11:20:41 +01:00
Julien Fontanet
1e5f13795c Merge pull request #117 from vatesfr/promise-settle
Implement utils.pSettle().
2015-11-06 14:51:56 +01:00
Julien Fontanet
cca2265633 Fix PV drivers detection. 2015-11-06 13:23:03 +01:00
Julien Fontanet
0f0d1ac370 Expose vm.xenTools property. 2015-11-06 13:08:03 +01:00
Julien Fontanet
d52d4ac183 Implement utils.pSettle(). 2015-11-06 11:17:35 +01:00
Julien Fontanet
841220fd01 Remove unused variable. 2015-11-05 16:42:17 +01:00
Julien Fontanet
ca5e10784b Only use absolute paths in redirects to ease reverse proxies. 2015-11-05 16:33:07 +01:00
Olivier Lambert
712319974b Merge pull request #114 from vatesfr/marsaudf-import-backup
Import a VM from a remote backup
2015-11-05 15:46:26 +01:00
Fabrice Marsaud
067a6d01bc List backup Files in remotes ans import them for restore 2015-11-05 15:38:09 +01:00
Julien Fontanet
27825f9e2e Unit tests for utils.pFinally(). 2015-11-04 15:36:07 +01:00
Julien Fontanet
425eb115dc Use ES2016 bind syntax for pFinally(). 2015-11-04 14:54:10 +01:00
Julien Fontanet
0a5ce55e2b Expose xapi.parseDateTime(). 2015-11-03 12:03:57 +01:00
Julien Fontanet
dbc8ed9d4c Update deps. 2015-11-03 10:54:14 +01:00
Julien Fontanet
e31e990684 Do not use ES6 method String#endsWith(). 2015-11-03 10:31:21 +01:00
Julien Fontanet
8618f56481 Support vm.import() without knowning the length. 2015-10-30 18:55:04 +01:00
Julien Fontanet
a39fc4667e Do not test on Node 5 for now. 2015-10-30 17:10:25 +01:00
Julien Fontanet
4c369e240b Merge pull request #112 from vatesfr/abhamonr-stats-use-server-time
The local xen orchestra timestamp is never used for get stats.
2015-10-30 14:32:56 +01:00
wescoeur
4e291d01d4 The local xen orchestra timestamp is never used for get stats.
Only the timestamp of one xenServer/rrd is used.
2015-10-30 14:18:37 +01:00
Julien Fontanet
b32fce46cb Merge pull request #111 from vatesfr/abhamonr-fixs-debugs-stats-messages
Fix debugs messages for stats
2015-10-29 18:14:13 +01:00
wescoeur
d1fcd45aac Fix debugs messages for stats 2015-10-29 16:49:13 +01:00
Julien Fontanet
ebdb92c708 4.8.1 2015-10-29 16:31:10 +01:00
Julien Fontanet
112909d35b Merge pull request #110 from vatesfr/julien-f-fix-password-rehash
Fix user password rehash.
2015-10-29 16:30:29 +01:00
Julien Fontanet
c6e2c559f1 Fix user password rehash. 2015-10-29 16:11:45 +01:00
Julien Fontanet
cf8613886a 4.8.0 2015-10-29 10:45:37 +01:00
Olivier Lambert
39c25c2001 moving json5 as a real dep and not a dev dep anymore. Fix #109 2015-10-28 19:21:32 +01:00
Julien Fontanet
9c9721ade5 Fix permissions checking for API -_-" 2015-10-28 17:22:09 +01:00
Julien Fontanet
4b8abe4ce8 Fix test.hasPermission(). 2015-10-28 17:09:33 +01:00
Julien Fontanet
33dfaba276 Remove useless console.log. 2015-10-28 16:56:50 +01:00
Julien Fontanet
dd8bbcf358 VM snapshots require operate permission on all SRs. (fix vatesfr/xo-web#429). 2015-10-28 16:56:41 +01:00
Julien Fontanet
cc3e4369ed Fix some VM-snapshot & VDI ACLs resolution. 2015-10-28 16:51:09 +01:00
Julien Fontanet
548355fce6 Use Xo#getUserByName() where possible. 2015-10-28 16:17:33 +01:00
Julien Fontanet
a4e0e6544b Merge pull request #108 from vatesfr/julien-f-acl-inheritance
ACLs inheritance (fix vatesfr/xo-web#279).
2015-10-28 15:29:31 +01:00
Julien Fontanet
62067e0801 ACLs inheritance (fix vatesfr/xo-web#279). 2015-10-28 15:11:28 +01:00
Olivier Lambert
5eb40d2299 Merge pull request #107 from vatesfr/pierre-remember-me-label
Clicking on 'remember me' label checks the box
2015-10-28 14:49:59 +01:00
Pierre
53990a531b Clicking on 'remember me' label checks the box 2015-10-28 14:46:14 +01:00
Olivier Lambert
d799aea3c4 Merge pull request #105 from vatesfr/abhamonr-intelligent-stats
New parser for host and vms stats.
2015-10-28 14:44:56 +01:00
Julien Fontanet
9af86cbba2 Adds granularity param spec to {host,vm}.stats(). 2015-10-28 14:35:00 +01:00
wescoeur
6fbfece4ff New parser for host and vms stats.
The data are cached for other http requests.
2015-10-28 14:20:48 +01:00
Julien Fontanet
d56cca7873 Use undefined provider for local providers. 2015-10-28 12:38:55 +01:00
Julien Fontanet
fa1096a6ba Remove unused variable. 2015-10-28 12:38:27 +01:00
Julien Fontanet
16ff721331 Throttle authentications tries by user (fix #339). 2015-10-28 12:11:50 +01:00
Julien Fontanet
798ee9dc46 Remove commented code. 2015-10-28 11:45:18 +01:00
Julien Fontanet
bc17c60305 Refactor Xo#authenticateUser(). 2015-10-28 11:44:17 +01:00
Julien Fontanet
432688d577 Remove unused code. 2015-10-28 11:44:17 +01:00
Julien Fontanet
da8d4b47f1 Remove some console logs. 2015-10-28 11:44:17 +01:00
Olivier Lambert
ed1e2e2449 Merge pull request #102 from vatesfr/julien-f-refactor-vm-import
Refactor `vm.import()`
2015-10-27 18:26:29 +01:00
Julien Fontanet
b9744b4688 Refactor vm.import()
- Move logic code to `Xapi#importVm()`
- `vm.import()` following request now returns the id of the imported VM
2015-10-27 18:00:51 +01:00
Julien Fontanet
2f328f8f37 Merge pull request #106 from vatesfr/pierre-patch-name-in-upload-task-description
'Pending tasks' panel: upload task displays patch name
2015-10-27 12:23:59 +01:00
Pierre
ffefd7e50b Added default patch name for uploadPoolPatch to handle handlePatchUpload in pool.js 2015-10-27 11:07:45 +01:00
Pierre
c2445f8a7c 'Pending tasks' panel: upload task displays patch name 2015-10-27 10:05:14 +01:00
Julien Fontanet
54d44079cc Remove unused dep. 2015-10-27 09:16:59 +01:00
Julien Fontanet
87f089a12c Do not proxy data when the connection is closed. 2015-10-26 18:30:45 +01:00
Julien Fontanet
aa2172e4db Use source-map-support-2 from npm (fix old npm). 2015-10-26 18:30:45 +01:00
Julien Fontanet
0a33e94e79 Remove unused variable. 2015-10-26 18:30:44 +01:00
Julien Fontanet
fada54abae Enable XO API HTTP requests without HTTP auth. 2015-10-26 18:30:44 +01:00
Olivier Lambert
802641b719 Merge pull request #103 from vatesfr/pierre-install-all-patches-on-host
Algorithm to install all patches in correct order
2015-10-26 18:24:07 +01:00
Pierre
1da93829d4 Algorithm to install patches in correct order 2015-10-26 17:56:50 +01:00
Julien Fontanet
9e7acbc49a Avoid special chars in date formatting (fix #448). 2015-10-26 16:49:41 +01:00
Julien Fontanet
318765d40b Remove unused dep. 2015-10-26 10:47:07 +01:00
Julien Fontanet
94ba20dfa1 Register objects with the correct key. 2015-10-23 16:26:06 +02:00
Julien Fontanet
2ad3dc4a32 Uniformize pool patches between collection and host.listMissingPatches(). 2015-10-23 15:09:13 +02:00
Julien Fontanet
eef940dd7c Pool patches ids are opaque refs instead of UUIDs. 2015-10-23 15:08:29 +02:00
Julien Fontanet
1b5fc12ac1 Use createRawObject() instead of Object.create(null). 2015-10-21 17:17:15 +02:00
Julien Fontanet
c1c7b8dfcd Minor updates in utils.js 2015-10-21 17:15:49 +02:00
Julien Fontanet
d4510c2afe Put schemas in a dedicated directory. 2015-10-20 18:02:57 +02:00
Julien Fontanet
f241f073a3 Merge pull request #101 from vatesfr/julien-f-hvm-network-install
Set correct boot order when installing HVM from network.
2015-10-19 15:08:50 +02:00
Julien Fontanet
26a6c72611 Set correct boot order when installing HVM from network. 2015-10-19 15:06:08 +02:00
Julien Fontanet
51cee7804b Merge pull request #100 from vatesfr/julien-f-expose-virtualization-mode
Expose virtualization mode in VM object.
2015-10-19 15:02:40 +02:00
Julien Fontanet
52228430f1 Expose virtualization mode in VM object. 2015-10-19 14:30:08 +02:00
Julien Fontanet
4fcd45d8a4 Do not filter out node_modules in stack traces. 2015-10-16 09:53:13 +02:00
Julien Fontanet
ea5736947d No need to parse source maps for ignored call sites. 2015-10-16 09:42:16 +02:00
Julien Fontanet
6b5f36fb7e Move source mapping after async support. 2015-10-15 15:05:42 +02:00
Julien Fontanet
b328d6d95f Augment stack traces limit to 100. 2015-10-15 15:05:42 +02:00
Julien Fontanet
2e06921bf8 Merge pull request #99 from vatesfr/abhamonr-stats-optimization-host
Cache Host stats for 5 secs to avoid unnecessary requests.
2015-10-15 11:30:07 +02:00
Julien Fontanet
fd95e2d711 Use source-map-support-2.
It is based on stack-chain and should give better results with
`trace`.
2015-10-15 10:42:51 +02:00
wescoeur
490e253b79 Cache Host stats for 5 secs to avoid unnecessary requests. 2015-10-14 17:03:53 +02:00
Julien Fontanet
8a10f5cd52 Merge pull request #98 from vatesfr/abhamonr-stats-optimization
Cache VM stats for 5 secs to avoid unnecessary requests.
2015-10-14 16:43:20 +02:00
wescoeur
5cebcc2424 Cache VM stats for 5 secs to avoid unnecessary requests. 2015-10-14 16:34:57 +02:00
Julien Fontanet
7663b89289 Allowed favicon before sign in. 2015-10-13 11:49:38 +02:00
86 changed files with 7845 additions and 2710 deletions

2
.gitignore vendored
View File

@@ -1,5 +1,7 @@
/dist/
/node_modules/
/src/api/index.js
/src/xo-mixins/index.js
npm-debug.log
npm-debug.log.*

View File

@@ -1,8 +1,8 @@
language: node_js
node_js:
- 'stable'
# - 'stable'
- '4'
- '0.12'
- '0.10'
# Use containers.
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/

View File

@@ -1,12 +1,16 @@
try { require('source-map-support/register') } catch (_) {}
Error.stackTraceLimit = 100
// Async stacks.
try { require('trace') } catch (_) {}
//
// Disabled for now as it cause a huge memory usage with
// fs.createReadStream().
// TODO: find a way to reenable.
//
// try { require('trace') } catch (_) {}
// Removes node_modules and internal modules.
// Removes internal modules.
try {
var sep = require('path').sep
var path = __dirname + sep + 'node_modules' + sep
require('stack-chain').filter.attach(function (_, frames) {
var filtered = frames.filter(function (frame) {
@@ -17,10 +21,7 @@ try {
name &&
// contains a separator (no internal modules)
name.indexOf(sep) !== -1 &&
// does not start with the current path followed by node_modules.
name.lastIndexOf(path, 0) !== 0
name.indexOf(sep) !== -1
)
})
@@ -31,3 +32,6 @@ try {
: frames
})
} catch (_) {}
// Source maps.
try { require('julien-f-source-map-support/register') } catch (_) {}

View File

@@ -7,4 +7,7 @@
// Better stack traces if possible.
require('../better-stacks')
// Make unhandled rejected promises visible on exit.
require('loud-rejection/register')
require('exec-promise')(require('../'))

10
bin/xo-server-logs Executable file
View File

@@ -0,0 +1,10 @@
#!/usr/bin/env node
'use strict'
// ===================================================================
// Better stack traces if possible.
require('../better-stacks')
require('exec-promise')(require('../dist/logs-cli'))

27
config.json Normal file
View File

@@ -0,0 +1,27 @@
// Vendor config: DO NOT TOUCH!
//
// See sample.config.yaml to override.
{
"http": {
"listen": [
{
"port": 80
}
],
"mounts": {}
},
"datadir": "/var/lib/xo-server/data",
// Should users be created on first sign in?
//
// Necessary for external authentication providers.
"createUserOnFirstSignin": true,
// Whether API logs should contains the full request/response on
// errors.
//
// This is disabled by default for performance (lots of data) and
// security concerns (avoiding sensitive data in the logs) but can
// be turned for investigation by the administrator.
"verboseApiLogsOnErrors": false
}

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server",
"version": "4.7.0",
"version": "4.13.1",
"license": "AGPL-3.0",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -19,6 +19,7 @@
"better-stacks.js",
"bin/",
"dist/",
"config.json",
"index.js",
"signin.jade"
],
@@ -29,73 +30,102 @@
"type": "git",
"url": "git://github.com/vatesfr/xo-server.git"
},
"engines": {
"node": ">=0.12 <5"
},
"dependencies": {
"@marsaud/smb2-promise": "^0.1.0",
"app-conf": "^0.4.0",
"babel-runtime": "^5",
"base64url": "1.0.4",
"base64url": "^1.0.5",
"blocked": "^1.1.0",
"bluebird": "^2.9.14",
"bluebird": "^3.1.1",
"body-parser": "^1.13.3",
"connect-flash": "^0.1.1",
"cookie": "^0.2.3",
"cookie-parser": "^1.3.5",
"cron": "^1.0.9",
"d3-time-format": "^0.3.0",
"debug": "^2.1.3",
"escape-string-regexp": "^1.0.3",
"event-to-promise": "^0.4.0",
"event-to-promise": "^0.6.0",
"exec-promise": "^0.5.1",
"execa": "^0.2.2",
"express": "^4.13.3",
"express-session": "^1.11.3",
"fs-extra": "^0.24.0",
"fatfs": "^0.10.3",
"fs-extra": "^0.26.2",
"fs-promise": "^0.3.1",
"got": "^4.2.0",
"get-stream": "^1.1.0",
"graceful-fs": "^4.1.2",
"hashy": "~0.4.2",
"http-server-plus": "^0.5.1",
"human-format": "^0.5.0",
"helmet": "^1.1.0",
"highland": "^2.5.1",
"http-server-plus": "^0.6.4",
"human-format": "^0.6.0",
"is-my-json-valid": "^2.12.2",
"jade": "^1.11.0",
"js-yaml": "^3.2.7",
"json-rpc-peer": "^0.10.0",
"json-rpc-peer": "^0.11.0",
"json5": "^0.4.0",
"julien-f-source-map-support": "0.0.0",
"julien-f-unzip": "^0.2.1",
"kindof": "^2.0.0",
"level": "^1.3.0",
"level-party": "^3.0.4",
"level-sublevel": "^6.5.2",
"leveldown": "^1.4.2",
"lodash.assign": "^3.0.0",
"lodash.bind": "^3.0.0",
"lodash.difference": "^3.2.0",
"lodash.endswith": "^3.0.2",
"lodash.every": "^4.0.0",
"lodash.filter": "^3.1.0",
"lodash.find": "^3.0.0",
"lodash.findindex": "^3.0.0",
"lodash.foreach": "^3.0.1",
"lodash.get": "^3.7.0",
"lodash.has": "^3.0.0",
"lodash.includes": "^3.1.1",
"lodash.invert": "^4.0.1",
"lodash.isarray": "^3.0.0",
"lodash.isboolean": "^3.0.2",
"lodash.isempty": "^3.0.0",
"lodash.isfunction": "^3.0.1",
"lodash.isinteger": "^4.0.0",
"lodash.isobject": "^3.0.0",
"lodash.isstring": "^3.0.0",
"lodash.keys": "^3.0.4",
"lodash.map": "^3.0.0",
"lodash.pick": "^3.0.0",
"lodash.result": "^3.0.0",
"lodash.sortby": "^3.1.4",
"lodash.startswith": "^3.0.1",
"lodash.trim": "^3.0.1",
"loud-rejection": "^1.2.0",
"make-error": "^1",
"micromatch": "^2.3.2",
"minimist": "^1.2.0",
"ms": "^0.7.1",
"multikey-hash": "^1.0.1",
"ndjson": "^1.4.3",
"partial-stream": "0.0.0",
"passport": "^0.3.0",
"passport-local": "^1.0.0",
"promise-toolbox": "^0.1.0",
"proxy-http-request": "0.1.0",
"redis": "^2.0.1",
"schema-inspector": "^1.5.1",
"semver": "^5.1.0",
"serve-static": "^1.9.2",
"source-map-support": "^0.3.2",
"stack-chain": "^1.3.3",
"trace": "^1.2.0",
"through2": "^2.0.0",
"trace": "^2.0.1",
"ws": "~0.8.0",
"xen-api": "^0.6.4",
"xen-api": "^0.7.2",
"xml2js": "~0.4.6",
"xo-collection": "^0.4.0"
"xo-acl-resolver": "0.0.0-0",
"xo-collection": "^0.4.0",
"xo-remote-parser": "^0.1.0"
},
"devDependencies": {
"babel-eslint": "^4.0.10",
@@ -110,17 +140,18 @@
"leche": "^2.1.1",
"mocha": "^2.2.1",
"must": "^0.13.1",
"node-inspector": "^0.12.2",
"sinon": "^1.14.1",
"standard": "^5.2.1"
},
"scripts": {
"build": "gulp build --production",
"dev": "gulp build",
"build": "npm run build-indexes && gulp build --production",
"build-indexes": "./tools/generate-index src/api & ./tools/generate-index src/xo-mixins",
"dev": "npm run build-indexes && gulp build",
"lint": "standard",
"prepublish": "npm run build",
"start": "node bin/xo-server",
"test": "npm run lint && dependency-check ./package.json && mocha --opts .mocha.opts \"dist/**/*.spec.js\""
"test": "mocha --opts .mocha.opts \"dist/**/*.spec.js\"",
"posttest": "npm run lint && dependency-check ./package.json"
},
"standard": {
"ignore": [

View File

@@ -66,6 +66,8 @@ http:
#socket: './http.sock'
# Basic HTTPS.
#
# You can find the list of possible options there https://nodejs.org/docs/latest/api/tls.html#tls.createServer
# -
# # The only difference is the presence of the certificate and the
# # key.
@@ -83,7 +85,7 @@ http:
# # certificate authority up to the root.
# #
# # Default: undefined
# certificate: './certificate.pem'
# cert: './certificate.pem'
# # File containing the private key (PEM format).
# #
@@ -93,6 +95,10 @@ http:
# # Default: undefined
# key: './key.pem'
# If set to true, all HTTP traffic will be redirected to the first
# HTTPs configuration.
#redirectToHttps: true
# List of files/directories which will be served.
mounts:
#'/': '/path/to/xo-web/dist/'
@@ -109,3 +115,9 @@ redis:
#
# Default: tcp://localhost:6379
#uri: ''
# Directory containing the database of XO.
# Currently used for logs.
#
# Default: '/var/lib/xo-server/data'
#datadir: '/var/lib/xo-server/data'

View File

@@ -44,11 +44,12 @@ html
.form-group
.col-sm-5
.checkbox
input(
name = 'remember-me'
type = 'checkbox'
)
= 'Remember me'
label
input(
name = 'remember-me'
type = 'checkbox'
)
| Remember me
.form-group
.col-sm-12
button.btn.btn-login.btn-block.btn-success

View File

@@ -50,3 +50,11 @@ export class AlreadyAuthenticated extends JsonRpcError {
super('already authenticated', 4)
}
}
// -------------------------------------------------------------------
export class ForbiddenOperation extends JsonRpcError {
constructor (operation, reason) {
super(`forbidden operation: ${operation}`, 5, reason)
}
}

View File

@@ -1,13 +1,9 @@
import createDebug from 'debug'
const debug = createDebug('xo:api')
import assign from 'lodash.assign'
import Bluebird from 'bluebird'
import forEach from 'lodash.foreach'
import getKeys from 'lodash.keys'
import isFunction from 'lodash.isfunction'
import kindOf from 'kindof'
import map from 'lodash.map'
import ms from 'ms'
import schemaInspector from 'schema-inspector'
@@ -17,6 +13,11 @@ import {
NoSuchObject,
Unauthorized
} from './api-errors'
import {
createRawObject,
forEach,
noop
} from './utils'
// ===================================================================
@@ -67,100 +68,6 @@ function checkParams (method, params) {
// -------------------------------------------------------------------
// Forward declaration.
let checkAuthorization
function authorized () {}
function forbiddden () { // eslint-disable-line no-unused-vars
throw null // eslint-disable-line no-throw-literal
}
function checkMemberAuthorization (member) {
return function (userId, object, permission) {
const memberObject = this.getObject(object[member])
return checkAuthorization.call(this, userId, memberObject, permission)
}
}
const checkAuthorizationByTypes = {
host (userId, host, permission) {
return defaultCheckAuthorization.call(this, userId, host, permission).catch(() => {
return checkAuthorization.call(this, userId, host.$pool, permission)
})
},
message: checkMemberAuthorization('$object'),
network (userId, network, permission) {
return defaultCheckAuthorization.call(this, userId, network, permission).catch(() => {
return checkAuthorization.call(this, userId, network.$pool, permission)
})
},
SR (userId, sr, permission) {
return defaultCheckAuthorization.call(this, userId, sr, permission).catch(() => {
return checkAuthorization.call(this, userId, sr.$pool, permission)
})
},
task: checkMemberAuthorization('$host'),
VBD: checkMemberAuthorization('VDI'),
// Access to a VDI is granted if the user has access to the
// containing SR or to a linked VM.
VDI (userId, vdi, permission) {
// Check authorization for each of the connected VMs.
const promises = map(this.getObjects(vdi.$VBDs, 'VBD'), vbd => {
const vm = this.getObject(vbd.VM, 'VM')
return checkAuthorization.call(this, userId, vm, permission)
})
// Check authorization for the containing SR.
const sr = this.getObject(vdi.$SR, 'SR')
promises.push(checkAuthorization.call(this, userId, sr, permission))
// We need at least one success
return Bluebird.any(promises)
},
VIF (userId, vif, permission) {
const network = this.getObject(vif.$network)
const vm = this.getObject(vif.$VM)
return Bluebird.any([
checkAuthorization.call(this, userId, network, permission),
checkAuthorization.call(this, userId, vm, permission)
])
},
VM (userId, vm, permission) {
return defaultCheckAuthorization.call(this, userId, vm, permission).catch(() => {
return checkAuthorization.call(this, userId, vm.$host, permission)
})
},
'VM-snapshot': checkMemberAuthorization('$snapshot_of'),
'VM-template': authorized
}
function throwIfFail (success) {
if (!success) {
// We don't care about an error object.
/* eslint no-throw-literal: 0 */
throw null
}
}
function defaultCheckAuthorization (userId, object, permission) {
return this.hasPermission(userId, object.id, permission).then(throwIfFail)
}
checkAuthorization = async function (userId, object, permission) {
const fn = checkAuthorizationByTypes[object.type] || defaultCheckAuthorization
return fn.call(this, userId, object, permission)
}
function resolveParams (method, params) {
const resolve = method.resolve
if (!resolve) {
@@ -173,9 +80,11 @@ function resolveParams (method, params) {
}
const userId = user.get('id')
const isAdmin = this.user.hasPermission('admin')
const promises = []
// Do not alter the original object.
params = { ...params }
const permissions = []
forEach(resolve, ([param, types, permission = 'administrate'], key) => {
const id = params[param]
if (id === undefined) {
@@ -190,15 +99,16 @@ function resolveParams (method, params) {
// Register this new value.
params[key] = object
if (!isAdmin) {
promises.push(checkAuthorization.call(this, userId, object, permission))
}
permissions.push([ object.id, permission ])
})
return Promise.all(promises).then(
() => params,
() => { throw new Unauthorized() }
)
return this.hasPermissions(userId, permissions).then(success => {
if (success) {
return params
}
throw new Unauthorized()
})
}
// ===================================================================
@@ -207,11 +117,11 @@ function getMethodsInfo () {
const methods = {}
forEach(this.api._methods, function (method, name) {
this[name] = assign({}, {
this[name] = {
description: method.description,
params: method.params || {},
permission: method.permission
})
}
}, methods)
return methods
@@ -242,11 +152,12 @@ function methodSignature ({method: name}) {
// Return an array for compatibility with XML-RPC.
return [
// XML-RPC require the name of the method.
assign({ name }, {
{
name,
description: method.description,
params: method.params || {},
permission: method.permission
})
}
]
}
methodSignature.description = 'returns the signature of an API method'
@@ -254,8 +165,12 @@ methodSignature.description = 'returns the signature of an API method'
// ===================================================================
export default class Api {
constructor ({context} = {}) {
this._methods = Object.create(null)
constructor ({
context,
verboseLogsOnErrors
} = {}) {
this._methods = createRawObject()
this._verboseLogsOnErrors = verboseLogsOnErrors
this.context = context
this.addMethods({
@@ -269,7 +184,19 @@ export default class Api {
}
addMethod (name, method) {
this._methods[name] = method
const methods = this._methods
if (name in methods) {
throw new Error(`API method ${name} already exists`)
}
methods[name] = method
let unset = () => {
delete methods[name]
unset = noop
}
return () => unset()
}
addMethods (methods) {
@@ -297,24 +224,32 @@ export default class Api {
throw new MethodNotFound(name)
}
const context = Object.create(this.context)
context.api = this // Used by system.*().
context.session = session
// FIXME: it can cause issues if there any property assignments in
// XO methods called from the API.
const context = Object.create(this.context, {
api: { // Used by system.*().
value: this
},
session: {
value: session
}
})
// FIXME: too coupled with XO.
// Fetch and inject the current user.
const userId = session.get('user_id', undefined)
if (userId) {
context.user = await context._getUser(userId)
}
context.user = userId && await context._getUser(userId)
const userName = context.user
? context.user.get('email')
: '(unknown user)'
try {
await checkPermission.call(context, method)
checkParams(method, params)
await resolveParams.call(context, method, params)
const resolvedParams = await resolveParams.call(context, method, params)
let result = await method.call(context, params)
let result = await method.call(context, resolvedParams)
// If nothing was returned, consider this operation a success
// and return true.
@@ -323,7 +258,8 @@ export default class Api {
}
debug(
'%s(...) [%s] ==> %s',
'%s | %s(...) [%s] ==> %s',
userName,
name,
ms(Date.now() - startTime),
kindOf(result)
@@ -331,16 +267,28 @@ export default class Api {
return result
} catch (error) {
debug(
'%s(...) [%s] =!> %s',
name,
ms(Date.now() - startTime),
error
)
if (this._verboseLogsOnErrors) {
debug(
'%s | %s(%j) [%s] =!> %s',
userName,
name,
params,
ms(Date.now() - startTime),
error
)
const stack = error && error.stack
if (stack) {
console.error(stack)
const stack = error && error.stack
if (stack) {
console.error(stack)
}
} else {
debug(
'%s | %s(...) [%s] =!> %s',
userName,
name,
ms(Date.now() - startTime),
error
)
}
throw error

View File

@@ -8,13 +8,13 @@ get.description = 'get existing ACLs'
// -------------------------------------------------------------------
export async function getCurrent () {
return await this.getAclsForUser(this.session.get('user_id'))
export async function getCurrentPermissions () {
return await this.getPermissionsForUser(this.session.get('user_id'))
}
getCurrent.permission = ''
getCurrentPermissions.permission = ''
getCurrent.description = 'get existing ACLs concerning current user'
getCurrentPermissions.description = 'get (explicit) permissions by object for the current user'
// -------------------------------------------------------------------

View File

@@ -3,9 +3,9 @@ import {parseSize} from '../utils'
// ===================================================================
export async function create ({name, size, sr}) {
const vdi = await this.getXAPI(sr).createVdi(parseSize(size), {
const vdi = await this.getXapi(sr).createVdi(parseSize(size), {
name_label: name,
sr: sr.id
sr: sr._xapiId
})
return vdi.$id
}
@@ -14,10 +14,27 @@ create.description = 'create a new disk on a SR'
create.params = {
name: { type: 'string' },
size: { type: 'string' },
size: { type: ['integer', 'string'] },
sr: { type: 'string' }
}
create.resolve = {
sr: ['sr', 'SR', 'administrate']
}
// -------------------------------------------------------------------
export async function resize ({ vdi, size }) {
await this.getXapi(vdi).resizeVdi(vdi._xapiId, parseSize(size))
}
resize.description = 'resize an existing VDI'
resize.params = {
id: { type: 'string' },
size: { type: ['integer', 'string'] }
}
resize.resolve = {
vdi: ['id', ['VDI', 'VDI-snapshot'], 'administrate']
}

View File

@@ -1,5 +1,5 @@
export async function register ({vm}) {
await this.getXAPI(vm).registerDockerContainer(vm.id)
await this.getXapi(vm).registerDockerContainer(vm._xapiId)
}
register.permission = 'admin'
@@ -16,7 +16,7 @@ register.resolve = {
// -----------------------------------------------------------------------------
export async function deregister ({vm}) {
await this.getXAPI(vm).unregisterDockerContainer(vm.id)
await this.getXapi(vm).unregisterDockerContainer(vm._xapiId)
}
deregister.permission = 'admin'
@@ -33,23 +33,23 @@ deregister.resolve = {
// -----------------------------------------------------------------------------
export async function start ({vm, container}) {
await this.getXAPI(vm).startDockerContainer(vm.id, container)
await this.getXapi(vm).startDockerContainer(vm._xapiId, container)
}
export async function stop ({vm, container}) {
await this.getXAPI(vm).stopDockerContainer(vm.id, container)
await this.getXapi(vm).stopDockerContainer(vm._xapiId, container)
}
export async function restart ({vm, container}) {
await this.getXAPI(vm).restartDockerContainer(vm.id, container)
await this.getXapi(vm).restartDockerContainer(vm._xapiId, container)
}
export async function pause ({vm, container}) {
await this.getXAPI(vm).pauseDockerContainer(vm.id, container)
await this.getXapi(vm).pauseDockerContainer(vm._xapiId, container)
}
export async function unpause ({vm, container}) {
await this.getXAPI(vm).unpauseDockerContainer(vm.id, container)
await this.getXapi(vm).unpauseDockerContainer(vm._xapiId, container)
}
for (let fn of [start, stop, restart, pause, unpause]) {

View File

@@ -30,11 +30,8 @@ export async function getAll () {
return await this._groups.get()
}
delete_.description = 'returns all the existing group'
delete_.permission = 'admin'
delete_.params = {
id: {type: 'string'}
}
getAll.description = 'returns all the existing group'
getAll.permission = 'admin'
// -------------------------------------------------------------------

View File

@@ -3,26 +3,27 @@ $find = require 'lodash.find'
$findIndex = require 'lodash.findindex'
$forEach = require 'lodash.foreach'
endsWith = require 'lodash.endswith'
got = require('got')
startsWith = require 'lodash.startswith'
{coroutine: $coroutine} = require 'bluebird'
{parseXml, promisify} = require '../utils'
{
extractProperty,
parseXml,
promisify
} = require '../utils'
#=====================================================================
set = $coroutine (params) ->
{host} = params
xapi = @getXAPI host
set = ({
host,
for param, field of {
'name_label'
'name_description'
}
continue unless param of params
yield xapi.call "host.set_#{field}", host.ref, params[param]
return true
# TODO: use camel case.
name_label: nameLabel,
name_description: nameDescription
}) ->
return @getXapi(host).setHostProperties(host._xapiId, {
nameLabel,
nameDescription
})
set.description = 'changes the properties of an host'
@@ -43,18 +44,19 @@ exports.set = set
#---------------------------------------------------------------------
restart = $coroutine ({host}) ->
xapi = @getXAPI host
yield xapi.call 'host.disable', host.ref
yield xapi.call 'host.reboot', host.ref
return true
# FIXME: set force to false per default when correctly implemented in
# UI.
restart = ({host, force = true}) ->
return @getXapi(host).rebootHost(host._xapiId, force)
restart.description = 'restart the host'
restart.params = {
id: { type: 'string' }
id: { type: 'string' },
force: {
type: 'boolean',
optional: true
}
}
restart.resolve = {
@@ -65,12 +67,8 @@ exports.restart = restart
#---------------------------------------------------------------------
restartAgent = $coroutine ({host}) ->
xapi = @getXAPI host
yield xapi.call 'host.restart_agent', host.ref
return true
restartAgent = ({host}) ->
return @getXapi(host).restartHostAgent(host._xapiId)
restartAgent.description = 'restart the Xen agent on the host'
@@ -79,7 +77,7 @@ restartAgent.params = {
}
restartAgent.resolve = {
host: ['id', 'host', 'operate'],
host: ['id', 'host', 'administrate'],
}
# TODO camel case
@@ -87,12 +85,8 @@ exports.restart_agent = restartAgent
#---------------------------------------------------------------------
start = $coroutine ({host}) ->
xapi = @getXAPI host
yield xapi.call 'host.power_on', host.ref
return true
start = ({host}) ->
return @getXapi(host).powerOnHost(host._xapiId)
start.description = 'start the host'
@@ -108,13 +102,8 @@ exports.start = start
#---------------------------------------------------------------------
stop = $coroutine ({host}) ->
xapi = @getXAPI host
yield xapi.call 'host.disable', host.ref
yield xapi.call 'host.shutdown', host.ref
return true
stop = ({host}) ->
return @getXapi(host).shutdownHost(host._xapiId)
stop.description = 'stop the host'
@@ -130,12 +119,8 @@ exports.stop = stop
#---------------------------------------------------------------------
detach = $coroutine ({host}) ->
xapi = @getXAPI host
yield xapi.call 'pool.eject', host.ref
return true
detach = ({host}) ->
return @getXapi(host).ejectHostFromPool(host._xapiId)
detach.description = 'eject the host of a pool'
@@ -151,12 +136,8 @@ exports.detach = detach
#---------------------------------------------------------------------
enable = $coroutine ({host}) ->
xapi = @getXAPI host
yield xapi.call 'host.enable', host.ref
return true
enable = ({host}) ->
return @getXapi(host).enableHost(host._xapiId)
enable.description = 'enable to create VM on the host'
@@ -172,12 +153,8 @@ exports.enable = enable
#---------------------------------------------------------------------
disable = $coroutine ({host}) ->
xapi = @getXAPI host
yield xapi.call 'host.disable', host.ref
return true
disable = ({host}) ->
return @getXapi(host).disableHost(host._xapiId)
disable.description = 'disable to create VM on the hsot'
@@ -193,8 +170,9 @@ exports.disable = disable
#---------------------------------------------------------------------
# TODO: to test and to fix.
createNetwork = $coroutine ({host, name, description, pif, mtu, vlan}) ->
xapi = @getXAPI host
xapi = @getXapi host
description = description ? 'Created with Xen Orchestra'
@@ -208,7 +186,7 @@ createNetwork = $coroutine ({host, name, description, pif, mtu, vlan}) ->
if pif?
vlan = vlan ? '0'
pif = @getObject pif, 'PIF'
yield xapi.call 'pool.create_VLAN_from_PIF', pif.ref, network_ref, vlan
yield xapi.call 'pool.create_VLAN_from_PIF', pif._xapiRef, network_ref, vlan
return true
@@ -233,7 +211,7 @@ exports.createNetwork = createNetwork
# Throws an error if the host is not running the latest XS version
listMissingPatches = ({host}) ->
return @getXAPI(host).listMissingPoolPatchesOnHost(host.id)
return @getXapi(host).listMissingPoolPatchesOnHost(host._xapiId)
listMissingPatches.params = {
host: { type: 'string' }
@@ -250,7 +228,7 @@ listMissingPatches.description = 'return an array of missing new patches in the
#---------------------------------------------------------------------
installPatch = ({host, patch: patchUuid}) ->
return @getXAPI(host).installPoolPatchOnHost(patchUuid, host.id)
return @getXapi(host).installPoolPatchOnHost(patchUuid, host._xapiId)
installPatch.description = 'install a patch on an host'
@@ -267,96 +245,51 @@ exports.installPatch = installPatch
#---------------------------------------------------------------------
installAllPatches = ({host}) ->
return @getXapi(host).installAllPoolPatchesOnHost(host._xapiId)
stats = $coroutine ({host, granularity}) ->
granularity = if granularity then granularity else 0
# granularity: 0: every 5 sec along last 10 minutes, 1: every minute along last 2 hours, 2: every hour along past week, 3: everyday along past year
# see http://xenserver.org/partners/developing-products-for-xenserver/18-sdk-development/96-xs-dev-rrds.html
installAllPatches.description = 'install all the missing patches on a host'
# select the AVERAGE values
granularity = {0:0, 1:1, 2:4, 3:7}[granularity]
xapi = @getXAPI host
installAllPatches.params = {
host: { type: 'string' }
}
{body} = response = yield got(
"https://#{host.address}/host_rrd?session_id=#{xapi.sessionId}",
{ rejectUnauthorized: false }
)
installAllPatches.resolve = {
host: ['host', 'host', 'administrate']
}
if response.statusCode isnt 200
throw new Error('Cannot fetch the RRDs')
exports.installAllPatches = installAllPatches
json = parseXml(body)
#---------------------------------------------------------------------
# Find index of needed objects for getting their values after
cpusIndexes = []
pifsIndexes = []
memoryFreeIndex = []
memoryIndex = []
loadIndex = []
index = 0
emergencyShutdownHost = ({host}) ->
return @getXapi(host).emergencyShutdownHost(host._xapiId)
$forEach(json.rrd.ds, (value, i) ->
if /^cpu[0-9]+$/.test(value.name)
cpusIndexes.push(i)
else if startsWith(value.name, 'pif_eth') && endsWith(value.name, '_tx')
pifsIndexes.push(i)
else if startsWith(value.name, 'pif_eth') && endsWith(value.name, '_rx')
pifsIndexes.push(i)
else if startsWith(value.name, 'loadavg')
loadIndex.push(i)
else if startsWith(value.name, 'memory_free_kib')
memoryFreeIndex.push(i)
else if startsWith(value.name, 'memory_total_kib')
memoryIndex.push(i)
emergencyShutdownHost.description = 'suspend all VMs and shutdown host'
return
)
emergencyShutdownHost.params = {
host: { type: 'string' }
}
memoryFree = []
memoryUsed = []
memory = []
load = []
cpus = []
pifs = []
date = []
archive = json.rrd.rra[granularity]
dateStep = json.rrd.step * archive.pdp_per_row
baseDate = json.rrd.lastupdate - (json.rrd.lastupdate % dateStep)
numStep = archive.database.row.length - 1
emergencyShutdownHost.resolve = {
host: ['host', 'host', 'administrate']
}
$forEach archive.database.row, (n, key) ->
memoryFree.push(Math.round(parseInt(n.v[memoryFreeIndex])))
memoryUsed.push(Math.round(parseInt(n.v[memoryIndex])-(n.v[memoryFreeIndex])))
memory.push(parseInt(n.v[memoryIndex]))
load.push(if n.v[loadIndex] == 'NaN' then null else n.v[loadIndex])
date.push(baseDate - (dateStep * (numStep - key)))
# build the multi dimensional arrays
$forEach cpusIndexes, (value, key) ->
cpus[key] ?= []
cpus[key].push(n.v[value]*100)
return
$forEach pifsIndexes, (value, key) ->
pifs[key] ?= []
pifs[key].push(if n.v[value] == 'NaN' then null else n.v[value]) # * (if key % 2 then -1 else 1))
return
return
exports.emergencyShutdownHost = emergencyShutdownHost
#---------------------------------------------------------------------
# the final object
return {
memoryFree: memoryFree
memoryUsed: memoryUsed
memory: memory
date: date
cpus: cpus
pifs: pifs
load: load
}
stats = ({host, granularity}) ->
return @getXapiHostStats(host, granularity)
stats.description = 'returns statistic of the host'
stats.params = {
host: { type: 'string' }
host: { type: 'string' },
granularity: {
type: 'string',
optional: true
}
}
stats.resolve = {
@@ -364,3 +297,9 @@ stats.resolve = {
}
exports.stats = stats;
#=====================================================================
Object.defineProperty(exports, '__esModule', {
value: true
})

View File

@@ -1,34 +0,0 @@
//
// This file has been generated by ./.generate-index.sh
//
// It MUST be re-generated each time an API namespace (read file) is
// added or removed.
//
export * as acl from './acl'
export * as disk from './disk'
export * as docker from './docker'
export * as group from './group'
export * as host from './host'
export * as job from './job'
export * as message from './message'
export * as pbd from './pbd'
export * as pif from './pif'
export * as plugin from './plugin'
export * as pool from './pool'
export * as remote from './remote'
export * as role from './role'
export * as schedule from './schedule'
export * as scheduler from './scheduler'
export * as server from './server'
export * as session from './session'
export * as sr from './sr'
export * as tag from './tag'
export * as task from './task'
export * as test from './test'
export * as token from './token'
export * as user from './user'
export * as vbd from './vbd'
export * as vdi from './vdi'
export * as vif from './vif'
export * as vm from './vm'
export * as xo from './xo'

View File

@@ -27,6 +27,7 @@ create.params = {
job: {
type: 'object',
properties: {
name: {type: 'string', optional: true},
type: {type: 'string'},
key: {type: 'string'},
method: {type: 'string'},
@@ -47,7 +48,8 @@ create.params = {
}
}
}
}
},
optional: true
}
}
}
@@ -64,6 +66,7 @@ set.params = {
type: 'object',
properties: {
id: {type: 'string'},
name: {type: 'string', optional: true},
type: {type: 'string'},
key: {type: 'string'},
method: {type: 'string'},
@@ -84,7 +87,8 @@ set.params = {
}
}
}
}
},
optional: true
}
}
}
@@ -101,3 +105,13 @@ delete_.params = {
}
export {delete_ as delete}
export async function runSequence ({idSequence}) {
await this.runJobSequence(idSequence)
}
runSequence.permission = 'admin'
runSequence.description = 'Runs jobs sequentially, in the provided order'
runSequence.params = {
idSequence: {type: 'array', items: {type: 'string'}}
}

28
src/api/log.js Normal file
View File

@@ -0,0 +1,28 @@
export async function get ({namespace}) {
const logger = this.getLogger(namespace)
return new Promise((resolve, reject) => {
const logs = {}
logger.createReadStream()
.on('data', (data) => {
logs[data.key] = data.value
})
.on('end', () => {
resolve(logs)
})
.on('error', reject)
})
}
get.description = 'returns logs list for one namespace'
function delete_ ({namespace, id}) {
const logger = this.getLogger(namespace)
logger.del(id)
}
delete_.description = 'deletes on or several logs from a namespace'
delete_.permission = 'admin'
export {delete_ as delete}

View File

@@ -1,5 +1,5 @@
async function delete_ ({message}) {
await this.getXAPI(message).call('message.destroy', message.ref)
async function delete_ ({ message }) {
await this.getXapi(message).call('message.destroy', message._xapiRef)
}
export {delete_ as delete}

View File

@@ -5,7 +5,7 @@
async function delete_ ({PBD}) {
// TODO: check if PBD is attached before
await this.getXAPI(PBD).call('PBD.destroy', PBD.ref)
await this.getXapi(PBD).call('PBD.destroy', PBD._xapiRef)
}
export {delete_ as delete}
@@ -22,7 +22,7 @@ delete_.resolve = {
export async function disconnect ({PBD}) {
// TODO: check if PBD is attached before
await this.getXAPI(PBD).call('PBD.unplug', PBD.ref)
await this.getXapi(PBD).call('PBD.unplug', PBD._xapiRef)
}
disconnect.params = {
@@ -38,7 +38,7 @@ disconnect.resolve = {
export async function connect ({PBD}) {
// TODO: check if PBD is attached before
await this.getXAPI(PBD).call('PBD.plug', PBD.ref)
await this.getXapi(PBD).call('PBD.plug', PBD._xapiRef)
}
connect.params = {

View File

@@ -1,9 +1,11 @@
// TODO: too low level, move into host.
// ===================================================================
// Delete
async function delete_ ({PIF}) {
// TODO: check if PIF is attached before
await this.getXAPI(PIF).call('PIF.destroy', PIF.ref)
await this.getXapi(PIF).call('PIF.destroy', PIF._xapiRef)
}
export {delete_ as delete}
@@ -20,7 +22,7 @@ delete_.resolve = {
export async function disconnect ({PIF}) {
// TODO: check if PIF is attached before
await this.getXAPI(PIF).call('PIF.unplug', PIF.ref)
await this.getXapi(PIF).call('PIF.unplug', PIF._xapiRef)
}
disconnect.params = {
@@ -35,7 +37,7 @@ disconnect.resolve = {
export async function connect ({PIF}) {
// TODO: check if PIF is attached before
await this.getXAPI(PIF).call('PIF.plug', PIF.ref)
await this.getXapi(PIF).call('PIF.plug', PIF._xapiRef)
}
connect.params = {

View File

@@ -1,48 +1,3 @@
// Plugin object structure (using [JSON Schema](http://json-schema.org)):
({
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
id: {
type: 'string',
description: 'unique identifier for this plugin'
},
name: {
type: 'string',
description: 'unique human readable name for this plugin'
},
autoload: {
type: 'boolean',
description: 'whether this plugin is loaded on startup'
},
loaded: {
type: 'boolean',
description: 'whether or not this plugin is currently loaded'
},
unloadable: {
type: 'boolean',
default: 'true',
description: 'whether or not this plugin can be unloaded'
},
configuration: {
type: 'object',
description: 'current configuration of this plugin (not present if none)'
},
configurationSchema: {
$ref: 'http://json-schema.org/draft-04/schema#',
description: 'configuration schema for this plugin (not present if not configurable)'
}
},
required: [
'id',
'name',
'autoload',
'loaded'
]
})
// ===================================================================
export async function get () {
return await this.getPlugins()
}
@@ -131,3 +86,19 @@ unload.params = {
}
unload.permission = 'admin'
// -------------------------------------------------------------------
export async function purgeConfiguration ({ id }) {
await this.purgePluginConfiguration(id)
}
purgeConfiguration.description = 'removes a plugin configuration'
purgeConfiguration.params = {
id: {
type: 'string'
}
}
purgeConfiguration.permission = 'admin'

View File

@@ -1,12 +1,18 @@
import {JsonRpcError} from '../api-errors'
import {extractProperty} from '../utils'
// ===================================================================
export async function set (params) {
const pool = extractProperty(params, 'pool')
export async function set ({
pool,
await this.getXAPI(pool).setPoolProperties(params)
// TODO: use camel case.
name_description: nameDescription,
name_label: nameLabel
}) {
await this.getXapi(pool).setPoolProperties({
nameDescription,
nameLabel
})
}
set.params = {
@@ -29,8 +35,27 @@ set.resolve = {
// -------------------------------------------------------------------
export async function setDefaultSr ({pool, sr}) {
await this.getXapi(pool).setDefaultSr(sr._xapiId)
}
setDefaultSr.params = {
pool: {
type: 'string'
},
sr: {
type: 'string'
}
}
setDefaultSr.resolve = {
pool: ['pool', 'pool', 'administrate'],
sr: ['sr', 'SR']
}
// -------------------------------------------------------------------
export async function installPatch ({pool, patch: patchUuid}) {
await this.getXAPI(pool).installPoolPatchOnAllHosts(patchUuid)
await this.getXapi(pool).installPoolPatchOnAllHosts(patchUuid)
}
installPatch.params = {
@@ -56,7 +81,7 @@ async function handlePatchUpload (req, res, {pool}) {
return
}
await this.getXAPI(pool).uploadPoolPatch(req, contentLength)
await this.getXapi(pool).uploadPoolPatch(req, contentLength)
}
export async function uploadPatch ({pool}) {
@@ -82,7 +107,7 @@ export {uploadPatch as patch}
export async function mergeInto ({ source, target, force }) {
try {
await this.mergeXenPools(source.id, target.id, force)
await this.mergeXenPools(source._xapiId, target._xapiId, force)
} catch (e) {
// FIXME: should we expose plain XAPI error messages?
throw new JsonRpcError(e.message)

View File

@@ -5,7 +5,7 @@ export async function getAll () {
getAll.permission = 'admin'
getAll.description = 'Gets all existing fs remote points'
export async function get (id) {
export async function get ({id}) {
return await this.getRemote(id)
}
@@ -15,6 +15,16 @@ get.params = {
id: {type: 'string'}
}
export async function list ({id}) {
return await this.listRemoteBackups(id)
}
list.permission = 'admin'
list.description = 'Lists the files found in a remote point'
list.params = {
id: {type: 'string'}
}
export async function create ({name, url}) {
return await this.createRemote({name, url})
}

View File

@@ -17,8 +17,8 @@ get.params = {
id: {type: 'string'}
}
export async function create ({jobId, cron, enabled}) {
return await this.createSchedule(this.session.get('user_id'), {job: jobId, cron, enabled})
export async function create ({jobId, cron, enabled, name}) {
return await this.createSchedule(this.session.get('user_id'), {job: jobId, cron, enabled, name})
}
create.permission = 'admin'
@@ -26,11 +26,12 @@ create.description = 'Creates a new schedule'
create.params = {
jobId: {type: 'string'},
cron: {type: 'string'},
enabled: {type: 'boolean', optional: true}
enabled: {type: 'boolean', optional: true},
name: {type: 'string', optional: true}
}
export async function set ({id, jobId, cron, enabled}) {
await this.updateSchedule(id, {job: jobId, cron, enabled})
export async function set ({id, jobId, cron, enabled, name}) {
await this.updateSchedule(id, {job: jobId, cron, enabled, name})
}
set.permission = 'admin'
@@ -39,7 +40,8 @@ set.params = {
id: {type: 'string'},
jobId: {type: 'string', optional: true},
cron: {type: 'string', optional: true},
enabled: {type: 'boolean', optional: true}
enabled: {type: 'boolean', optional: true},
name: {type: 'string', optional: true}
}
async function delete_ ({id}) {

View File

@@ -1,14 +1,17 @@
import { noop } from '../utils'
export async function add ({
host,
username,
password,
readOnly,
autoConnect = true
}) {
const server = await this.registerXenServer({host, username, password})
const server = await this.registerXenServer({host, username, password, readOnly})
if (autoConnect) {
// Connect asynchronously, ignore any error.
this.connectXenServer(server.id).catch(() => {})
// Connect asynchronously, ignore any errors.
this.connectXenServer(server.id).catch(noop)
}
return server.id
@@ -70,11 +73,11 @@ getAll.permission = 'admin'
// -------------------------------------------------------------------
export async function set ({id, host, username, password}) {
await this.updateXenServer(id, {host, username, password})
export async function set ({id, host, username, password, readOnly}) {
await this.updateXenServer(id, {host, username, password, readOnly})
}
set.description = 'changes the propeorties of a Xen server'
set.description = 'changes the properties of a Xen server'
set.permission = 'admin'
@@ -99,6 +102,7 @@ set.params = {
// -------------------------------------------------------------------
export async function connect ({id}) {
this.updateXenServer(id, {enabled: true}).catch(noop)
await this.connectXenServer(id)
}
@@ -115,6 +119,7 @@ connect.params = {
// -------------------------------------------------------------------
export async function disconnect ({id}) {
this.updateXenServer(id, {enabled: false}).catch(noop)
await this.disconnectXenServer(id)
}

View File

@@ -1,13 +1,22 @@
import forEach from 'lodash.foreach'
import {ensureArray, parseXml} from '../utils'
import {
ensureArray,
forEach,
parseXml
} from '../utils'
// ===================================================================
export async function set (params) {
const {sr} = params
delete params.sr
export async function set ({
sr,
await this.getXAPI(sr).setSrProperties(sr.id, params)
// TODO: use camel case.
name_description: nameDescription,
name_label: nameLabel
}) {
await this.getXapi(sr).setSrProperties(sr._xapiId, {
nameDescription,
nameLabel
})
}
set.params = {
@@ -25,7 +34,7 @@ set.resolve = {
// -------------------------------------------------------------------
export async function scan ({SR}) {
await this.getXAPI(SR).call('SR.scan', SR.ref)
await this.getXapi(SR).call('SR.scan', SR._xapiRef)
}
scan.params = {
@@ -40,7 +49,7 @@ scan.resolve = {
// TODO: find a way to call this "delete" and not destroy
export async function destroy ({SR}) {
await this.getXAPI(SR).call('SR.destroy', SR.ref)
await this.getXapi(SR).call('SR.destroy', SR._xapiRef)
}
destroy.params = {
@@ -54,7 +63,7 @@ destroy.resolve = {
// -------------------------------------------------------------------
export async function forget ({SR}) {
await this.getXAPI(SR).call('SR.forget', SR.ref)
await this.getXapi(SR).call('SR.forget', SR._xapiRef)
}
forget.params = {
@@ -73,7 +82,7 @@ export async function createIso ({
nameDescription,
path
}) {
const xapi = this.getXAPI(host)
const xapi = this.getXapi(host)
// FIXME: won't work for IPv6
// Detect if NFS or local path for ISO files
@@ -84,7 +93,7 @@ export async function createIso ({
}
const srRef = await xapi.call(
'SR.create',
host.ref,
host._xapiRef,
deviceConfig,
'0', // SR size 0 because ISO
nameLabel,
@@ -123,7 +132,7 @@ export async function createNfs ({
serverPath,
nfsVersion
}) {
const xapi = this.getXAPI(host)
const xapi = this.getXapi(host)
const deviceConfig = {
server,
@@ -137,7 +146,7 @@ export async function createNfs ({
const srRef = await xapi.call(
'SR.create',
host.ref,
host._xapiRef,
deviceConfig,
'0',
nameLabel,
@@ -176,7 +185,7 @@ export async function createLvm ({
nameDescription,
device
}) {
const xapi = this.getXAPI(host)
const xapi = this.getXapi(host)
const deviceConfig = {
device
@@ -184,7 +193,7 @@ export async function createLvm ({
const srRef = await xapi.call(
'SR.create',
host.ref,
host._xapiRef,
deviceConfig,
'0',
nameLabel,
@@ -218,7 +227,7 @@ export async function probeNfs ({
host,
server
}) {
const xapi = this.getXAPI(host)
const xapi = this.getXapi(host)
const deviceConfig = {
server
@@ -229,7 +238,7 @@ export async function probeNfs ({
try {
await xapi.call(
'SR.probe',
host.ref,
host._xapiRef,
deviceConfig,
'nfs',
{}
@@ -281,7 +290,7 @@ export async function createIscsi ({
chapUser,
chapPassword
}) {
const xapi = this.getXAPI(host)
const xapi = this.getXapi(host)
const deviceConfig = {
target,
@@ -302,7 +311,7 @@ export async function createIscsi ({
const srRef = await xapi.call(
'SR.create',
host.ref,
host._xapiRef,
deviceConfig,
'0',
nameLabel,
@@ -344,7 +353,7 @@ export async function probeIscsiIqns ({
chapUser,
chapPassword
}) {
const xapi = this.getXAPI(host)
const xapi = this.getXapi(host)
const deviceConfig = {
target: targetIp
@@ -366,7 +375,7 @@ export async function probeIscsiIqns ({
try {
await xapi.call(
'SR.probe',
host.ref,
host._xapiRef,
deviceConfig,
'lvmoiscsi',
{}
@@ -421,7 +430,7 @@ export async function probeIscsiLuns ({
chapUser,
chapPassword
}) {
const xapi = this.getXAPI(host)
const xapi = this.getXapi(host)
const deviceConfig = {
target: targetIp,
@@ -444,7 +453,7 @@ export async function probeIscsiLuns ({
try {
await xapi.call(
'SR.probe',
host.ref,
host._xapiRef,
deviceConfig,
'lvmoiscsi',
{}
@@ -499,7 +508,7 @@ export async function probeIscsiExists ({
chapUser,
chapPassword
}) {
const xapi = this.getXAPI(host)
const xapi = this.getXapi(host)
const deviceConfig = {
target: targetIp,
@@ -518,7 +527,7 @@ export async function probeIscsiExists ({
deviceConfig.port = port
}
const xml = parseXml(await xapi.call('SR.probe', host.ref, deviceConfig, 'lvmoiscsi', {}))
const xml = parseXml(await xapi.call('SR.probe', host._xapiRef, deviceConfig, 'lvmoiscsi', {}))
const srs = []
forEach(ensureArray(xml['SRlist'].SR), sr => {
@@ -552,14 +561,14 @@ export async function probeNfsExists ({
server,
serverPath
}) {
const xapi = this.getXAPI(host)
const xapi = this.getXapi(host)
const deviceConfig = {
server,
serverpath: serverPath
}
const xml = parseXml(await xapi.call('SR.probe', host.ref, deviceConfig, 'nfs', {}))
const xml = parseXml(await xapi.call('SR.probe', host._xapiRef, deviceConfig, 'nfs', {}))
const srs = []
@@ -591,7 +600,7 @@ export async function reattach ({
nameDescription,
type
}) {
const xapi = this.getXAPI(host)
const xapi = this.getXapi(host)
if (type === 'iscsi') {
type = 'lvmoiscsi' // the internal XAPI name
@@ -634,7 +643,7 @@ export async function reattachIso ({
nameDescription,
type
}) {
const xapi = this.getXAPI(host)
const xapi = this.getXapi(host)
if (type === 'iscsi') {
type = 'lvmoiscsi' // the internal XAPI name

View File

@@ -1,5 +1,5 @@
export async function add ({tag, object}) {
await this.getXAPI(object).addTag(object.id, tag)
await this.getXapi(object).addTag(object._xapiId, tag)
}
add.description = 'add a new tag to an object'
@@ -16,7 +16,7 @@ add.params = {
// -------------------------------------------------------------------
export async function remove ({tag, object}) {
await this.getXAPI(object).removeTag(object.id, tag)
await this.getXapi(object).removeTag(object._xapiId, tag)
}
remove.description = 'remove an existing tag from an object'

View File

@@ -1,5 +1,5 @@
export async function cancel ({task}) {
await this.getXAPI(task).call('task.cancel', task.ref)
await this.getXapi(task).call('task.cancel', task._xapiRef)
}
cancel.params = {
@@ -13,7 +13,7 @@ cancel.resolve = {
// -------------------------------------------------------------------
export async function destroy ({task}) {
await this.getXAPI(task).call('task.destroy', task.ref)
await this.getXapi(task).call('task.destroy', task._xapiRef)
}
destroy.params = {

View File

@@ -1,5 +1,21 @@
export function hasPermission ({userId, objectId, permission}) {
return this.hasPermission(userId, objectId, permission)
export function getPermissionsForUser ({ userId }) {
return this.getPermissionsForUser(userId)
}
getPermissionsForUser.permission = 'admin'
getPermissionsForUser.params = {
userId: {
type: 'string'
}
}
// -------------------------------------------------------------------
export function hasPermission ({ userId, objectId, permission }) {
return this.hasPermissions(userId, [
[ objectId, permission ]
])
}
hasPermission.permission = 'admin'

View File

@@ -1,6 +1,5 @@
import map from 'lodash.map'
import {InvalidParameters} from '../api-errors'
import { mapToArray } from '../utils'
// ===================================================================
@@ -49,7 +48,7 @@ export async function getAll () {
const users = await this._users.get()
// Filters out private properties.
return map(users, this.getUserPublicProperties)
return mapToArray(users, this.getUserPublicProperties)
}
getAll.description = 'returns all the existing users'
@@ -73,9 +72,11 @@ set.params = {
permission: { type: 'string', optional: true }
}
// -------------------------------------------------------------------
export async function changePassword ({oldPassword, newPassword}) {
const id = this.session.get('user_id')
await this.changePassword(id, oldPassword, newPassword)
await this.changeUserPassword(id, oldPassword, newPassword)
}
changePassword.description = 'change password after checking old password (user function)'

View File

@@ -5,10 +5,10 @@
#=====================================================================
delete_ = $coroutine ({vbd}) ->
xapi = @getXAPI vbd
xapi = @getXapi vbd
# TODO: check if VBD is attached before
yield xapi.call 'VBD.destroy', vbd.ref
yield xapi.call 'VBD.destroy', vbd._xapiRef
return true
@@ -25,12 +25,9 @@ exports.delete = delete_
#---------------------------------------------------------------------
disconnect = $coroutine ({vbd}) ->
xapi = @getXAPI vbd
# TODO: check if VBD is attached before
yield xapi.call 'VBD.unplug_force', vbd.ref
return true
xapi = @getXapi vbd
yield xapi.disconnectVbd(vbd._xapiRef)
return
disconnect.params = {
id: { type: 'string' }
@@ -45,12 +42,9 @@ exports.disconnect = disconnect
#---------------------------------------------------------------------
connect = $coroutine ({vbd}) ->
xapi = @getXAPI vbd
# TODO: check if VBD is attached before
yield xapi.call 'VBD.plug', vbd.ref
return true
xapi = @getXapi vbd
yield xapi.connectVbd(vbd._xapiRef)
return
connect.params = {
id: { type: 'string' }
@@ -66,9 +60,9 @@ exports.connect = connect
set = $coroutine (params) ->
{vbd} = params
xapi = @getXAPI vbd
xapi = @getXapi vbd
{ref} = vbd
{ _xapiRef: ref } = vbd
# VBD position
if 'position' of params
@@ -87,3 +81,29 @@ set.resolve = {
}
exports.set = set
#---------------------------------------------------------------------
setBootable = $coroutine ({vbd, bootable}) ->
xapi = @getXapi vbd
{ _xapiRef: ref } = vbd
yield xapi.call 'VBD.set_bootable', ref, bootable
return
setBootable.params = {
vbd: { type: 'string' }
bootable: { type: 'boolean' }
}
setBootable.resolve = {
vbd: ['vbd', 'VBD', 'administrate'],
}
exports.setBootable = setBootable
#=====================================================================
Object.defineProperty(exports, '__esModule', {
value: true
})

View File

@@ -3,13 +3,15 @@
$isArray = require 'lodash.isarray'
{coroutine: $coroutine} = require 'bluebird'
{format} = require 'json-rpc-peer'
{InvalidParameters} = require '../api-errors'
{parseSize} = require '../utils'
{JsonRpcError} = require '../api-errors'
#=====================================================================
delete_ = $coroutine ({vdi}) ->
yield @getXAPI(vdi).deleteVdi(vdi.id)
yield @getXapi(vdi).deleteVdi(vdi._xapiId)
return
@@ -18,7 +20,7 @@ delete_.params = {
}
delete_.resolve = {
vdi: ['id', 'VDI', 'administrate'],
vdi: ['id', ['VDI', 'VDI-snapshot'], 'administrate'],
}
exports.delete = delete_
@@ -28,9 +30,9 @@ exports.delete = delete_
# FIXME: human readable strings should be handled.
set = $coroutine (params) ->
{vdi} = params
xapi = @getXAPI vdi
xapi = @getXapi vdi
{ref} = vdi
{_xapiRef: ref} = vdi
# Size.
if 'size' of params
@@ -40,8 +42,7 @@ set = $coroutine (params) ->
throw new InvalidParameters(
"cannot set new size (#{size}) below the current size (#{vdi.size})"
)
yield xapi.call 'VDI.resize_online', ref, "#{size}"
yield xapi.resizeVdi(ref, size)
# Other fields.
for param, fields of {
@@ -68,7 +69,7 @@ set.params = {
}
set.resolve = {
vdi: ['id', 'VDI', 'administrate'],
vdi: ['id', ['VDI', 'VDI-snapshot'], 'administrate'],
}
exports.set = set
@@ -76,10 +77,9 @@ exports.set = set
#---------------------------------------------------------------------
migrate = $coroutine ({vdi, sr}) ->
xapi = @getXAPI vdi
xapi = @getXapi vdi
# TODO: check if VDI is attached before
yield xapi.call 'VDI.pool_migrate', vdi.ref, sr.ref, {}
yield xapi.moveVdi(vdi._xapiRef, sr._xapiRef)
return true
@@ -89,8 +89,14 @@ migrate.params = {
}
migrate.resolve = {
vdi: ['id', 'VDI', 'administrate'],
vdi: ['id', ['VDI', 'VDI-snapshot'], 'administrate'],
sr: ['sr_id', 'SR', 'administrate'],
}
exports.migrate = migrate
#=====================================================================
Object.defineProperty(exports, '__esModule', {
value: true
})

View File

@@ -1,6 +1,6 @@
// TODO: move into vm and rename to removeInterface
async function delete_ ({vif}) {
await this.getXAPI(vif).deleteVif(vif.id)
await this.getXapi(vif).deleteVif(vif._xapiId)
}
export {delete_ as delete}
@@ -16,7 +16,7 @@ delete_.resolve = {
// TODO: move into vm and rename to disconnectInterface
export async function disconnect ({vif}) {
// TODO: check if VIF is attached before
await this.getXAPI(vif).call('VIF.unplug_force', vif.ref)
await this.getXapi(vif).call('VIF.unplug_force', vif._xapiRef)
}
disconnect.params = {
@@ -31,7 +31,7 @@ disconnect.resolve = {
// TODO: move into vm and rename to connectInterface
export async function connect ({vif}) {
// TODO: check if VIF is attached before
await this.getXAPI(vif).call('VIF.plug', vif.ref)
await this.getXapi(vif).call('VIF.plug', vif._xapiRef)
}
connect.params = {

View File

@@ -2,20 +2,24 @@ $debug = (require 'debug') 'xo:api:vm'
$filter = require 'lodash.filter'
$findIndex = require 'lodash.findindex'
$findWhere = require 'lodash.find'
$forEach = require 'lodash.foreach'
$isArray = require 'lodash.isarray'
$result = require 'lodash.result'
endsWith = require 'lodash.endswith'
escapeStringRegexp = require 'escape-string-regexp'
eventToPromise = require 'event-to-promise'
got = require('got')
map = require 'lodash.map'
sortBy = require 'lodash.sortby'
startsWith = require 'lodash.startswith'
{coroutine: $coroutine} = require 'bluebird'
{format} = require 'json-rpc-peer'
{
JsonRpcError,
Unauthorized
} = require('../api-errors')
{
forEach,
formatXml: $js2xml,
mapToArray,
parseSize,
parseXml,
pFinally
} = require '../utils'
@@ -23,6 +27,28 @@ startsWith = require 'lodash.startswith'
#=====================================================================
checkPermissionOnSrs = (vm, permission = 'operate') -> (
permissions = []
forEach(vm.$VBDs, (vbdId) =>
vbd = @getObject(vbdId, 'VBD')
vdiId = vbd.VDI
if vbd.is_cd_drive or not vdiId
return
permissions.push([
@getObject(vdiId, 'VDI').$SR,
permission
])
)
return @hasPermissions(@session.get('user_id'), permissions).then((success) => (
throw new Unauthorized() unless success
))
)
#=====================================================================
# TODO: Implement ACLs
create = $coroutine ({
installation
@@ -32,14 +58,16 @@ create = $coroutine ({
pv_args
VDIs
VIFs
existingDisks
}) ->
vm = yield @getXAPI(template).createVm(template.id, {
vm = yield @getXapi(template).createVm(template._xapiId, {
installRepository: installation && installation.repository,
nameDescription: name_description,
nameLabel: name_label,
pvArgs: pv_args,
vdis: VDIs,
vifs: VIFs
vifs: VIFs,
existingDisks
})
return vm.$id
@@ -79,7 +107,7 @@ create.params = {
# UUID of the network to create the interface in.
network: { type: 'string' }
MAC: {
mac: {
optional: true # Auto-generated per default.
type: 'string'
}
@@ -95,7 +123,7 @@ create.params = {
type: 'object'
properties: {
device: { type: 'string' }
size: { type: 'integer' }
size: { type: ['integer', 'string'] }
SR: { type: 'string' }
type: { type: 'string' }
}
@@ -112,7 +140,7 @@ exports.create = create
#---------------------------------------------------------------------
delete_ = ({vm, delete_disks: deleteDisks}) ->
return @getXAPI(vm).deleteVm(vm.id, deleteDisks)
return @getXapi(vm).deleteVm(vm._xapiId, deleteDisks)
delete_.params = {
id: { type: 'string' }
@@ -131,7 +159,7 @@ exports.delete = delete_
#---------------------------------------------------------------------
ejectCd = $coroutine ({vm}) ->
yield @getXAPI(vm).ejectCdFromVm(vm.id)
yield @getXapi(vm).ejectCdFromVm(vm._xapiId)
return
ejectCd.params = {
@@ -146,7 +174,7 @@ exports.ejectCd = ejectCd
#---------------------------------------------------------------------
insertCd = $coroutine ({vm, vdi, force}) ->
yield @getXAPI(vm).insertCdIntoVm(vdi.id, vm.id, {force})
yield @getXapi(vm).insertCdIntoVm(vdi._xapiId, vm._xapiId, {force})
return
insertCd.params = {
@@ -163,82 +191,83 @@ exports.insertCd = insertCd
#---------------------------------------------------------------------
migrate = $coroutine ({vm, host}) ->
yield @getXAPI(vm).migrateVm(vm.id, @getXAPI(host), host.id)
migrate = $coroutine ({
vm,
host,
mapVdisSrs,
mapVifsNetworks,
migrationNetwork
}) ->
permissions = []
if mapVdisSrs
mapVdisSrsXapi = {}
forEach mapVdisSrs, (srId, vdiId) =>
vdiXapiId = @getObject(vdiId, 'VDI')._xapiId
mapVdisSrsXapi[vdiXapiId] = @getObject(srId, 'SR')._xapiId
permissions.push([
srId,
'administrate'
])
if mapVifsNetworks
mapVifsNetworksXapi = {}
forEach mapVifsNetworks, (networkId, vifId) =>
vifXapiId = @getObject(vifId, 'VIF')._xapiId
mapVifsNetworksXapi[vifXapiId] = @getObject(networkId, 'network')._xapiId
permissions.push([
networkId,
'administrate'
])
unless yield @hasPermissions(@session.get('user_id'), permissions)
throw new Unauthorized()
yield @getXapi(vm).migrateVm(vm._xapiId, @getXapi(host), host._xapiId, {
migrationNetworkId: migrationNetwork?._xapiId
mapVifsNetworks: mapVifsNetworksXapi,
mapVdisSrs: mapVdisSrsXapi,
})
return
migrate.params = {
# Identifier of the VM to migrate.
id: { type: 'string' }
vm: { type: 'string' }
# Identifier of the host to migrate to.
host_id: { type: 'string' }
targetHost: { type: 'string' }
# Map VDIs IDs --> SRs IDs
mapVdisSrs: { type: 'object', optional: true }
# Map VIFs IDs --> Networks IDs
mapVifsNetworks: { type: 'object', optional: true }
# Identifier of the Network use for the migration
migrationNetwork: { type: 'string', optional: true }
}
migrate.resolve = {
vm: ['id', 'VM']
host: ['host_id', 'host', 'administrate']
vm: ['vm', 'VM', 'administrate'],
host: ['targetHost', 'host', 'administrate'],
migrationNetwork: ['migrationNetwork', 'network', 'administrate'],
}
exports.migrate = migrate
#---------------------------------------------------------------------
migratePool = $coroutine ({
vm,
host
sr
network
migrationNetwork
}) ->
yield @getXAPI(vm).migrateVm(vm.id, @getXAPI(host), host.id, {
migrationNetworkId: migrationNetwork?.id
networkId: network?.id,
srId: sr?.id,
})
return
migratePool.params = {
# Identifier of the VM to migrate.
id: { type: 'string' }
# Identifier of the host to migrate to.
target_host_id: { type: 'string' }
# Identifier of the target SR
target_sr_id: { type: 'string', optional: true }
# Identifier of the target Network
target_network_id: { type: 'string', optional: true }
# Identifier of the Network use for the migration
migration_network_id: { type: 'string', optional: true }
}
migratePool.resolve = {
vm: ['id', 'VM', 'administrate'],
host: ['target_host_id', 'host', 'administrate'],
sr: ['target_sr_id', 'SR', 'administrate'],
network: ['target_network_id', 'network', 'administrate'],
migrationNetwork: ['migration_network_id', 'network', 'administrate'],
}
# TODO: camel case.
exports.migrate_pool = migratePool
#---------------------------------------------------------------------
# FIXME: human readable strings should be handled.
set = $coroutine (params) ->
{VM} = params
xapi = @getXAPI VM
xapi = @getXapi VM
{ref} = VM
{_xapiRef: ref} = VM
# Memory.
if 'memory' of params
{memory} = params
memory = parseSize(params.memory)
if memory < VM.memory.static[0]
@throw(
@@ -291,9 +320,13 @@ set = $coroutine (params) ->
if auto_poweron
yield xapi.call 'VM.add_to_other_config', ref, 'auto_poweron', 'true'
yield xapi.setPoolProperties({autoPowerOn: true})
else
yield xapi.call 'VM.remove_from_other_config', ref, 'auto_poweron'
if 'cpuWeight' of params
yield xapi.setVcpuWeight(VM._xapiId, params.cpuWeight)
# Other fields.
for param, fields of {
'name_label'
@@ -327,10 +360,12 @@ set.params = {
# Memory to allocate (in bytes).
#
# Note: static_min ≤ dynamic_min ≤ dynamic_max ≤ static_max
memory: { type: 'integer', optional: true }
memory: { type: ['integer', 'string'], optional: true }
# Kernel arguments for PV VM.
PV_args: { type: 'string', optional: true }
cpuWeight: { type: 'integer', optional: true}
}
set.resolve = {
@@ -342,12 +377,12 @@ exports.set = set
#---------------------------------------------------------------------
restart = $coroutine ({vm, force}) ->
xapi = @getXAPI(vm)
xapi = @getXapi(vm)
if force
yield xapi.call 'VM.hard_reboot', vm.ref
yield xapi.call 'VM.hard_reboot', vm._xapiRef
else
yield xapi.call 'VM.clean_reboot', vm.ref
yield xapi.call 'VM.clean_reboot', vm._xapiRef
return true
@@ -365,14 +400,12 @@ exports.restart = restart
#---------------------------------------------------------------------
clone = $coroutine ({vm, name, full_copy}) ->
xapi = @getXAPI(vm)
yield checkPermissionOnSrs.call(this, vm)
newVm = yield if full_copy
xapi.copyVm(vm.ref, null, name)
else
xapi.cloneVm(vm.ref, name)
return newVm.$id
return @getXapi(vm).cloneVm(vm._xapiRef, {
nameLabel: name,
fast: not full_copy
}).then((vm) -> vm.$id)
clone.params = {
id: { type: 'string' }
@@ -389,9 +422,50 @@ exports.clone = clone
#---------------------------------------------------------------------
copy = $coroutine ({
compress,
name: nameLabel,
sr,
vm
}) ->
if vm.$pool == sr.$pool
if vm.power_state is 'Running'
yield checkPermissionOnSrs.call(this, vm)
return @getXapi(vm).copyVm(vm._xapiId, sr._xapiId, {
nameLabel
}).then((vm) -> vm.$id)
return @getXapi(vm).remoteCopyVm(vm._xapiId, @getXapi(sr), sr._xapiId, {
compress,
nameLabel
}).then((vm) -> vm.$id)
copy.params = {
compress: {
type: 'boolean',
optional: true
},
name: {
type: 'string',
optional: true
},
vm: { type: 'string' },
sr: { type: 'string' }
}
copy.resolve = {
vm: [ 'vm', 'VM', 'administrate' ]
sr: [ 'sr', 'SR', 'operate' ]
}
exports.copy = copy
#---------------------------------------------------------------------
# TODO: rename convertToTemplate()
convert = $coroutine ({vm}) ->
yield @getXAPI(vm).call 'VM.set_is_a_template', vm.ref, true
yield @getXapi(vm).call 'VM.set_is_a_template', vm._xapiRef, true
return true
@@ -407,7 +481,9 @@ exports.convert = convert
#---------------------------------------------------------------------
snapshot = $coroutine ({vm, name}) ->
snapshot = yield @getXAPI(vm).snapshotVm(vm.ref, name)
yield checkPermissionOnSrs.call(this, vm)
snapshot = yield @getXapi(vm).snapshotVm(vm._xapiRef, name)
return snapshot.$id
snapshot.params = {
@@ -422,9 +498,69 @@ exports.snapshot = snapshot
#---------------------------------------------------------------------
rollingDeltaBackup = $coroutine ({vm, remote, tag, depth}) ->
return yield @rollingDeltaVmBackup({
vm,
remoteId: remote,
tag,
depth
})
rollingDeltaBackup.params = {
vm: { type: 'string' }
remote: { type: 'string' }
tag: { type: 'string'}
depth: { type: ['string', 'number'] }
}
rollingDeltaBackup.resolve = {
vm: ['vm', ['VM', 'VM-snapshot'], 'administrate']
}
rollingDeltaBackup.permission = 'admin'
exports.rollingDeltaBackup = rollingDeltaBackup
#---------------------------------------------------------------------
importDeltaBackup = ({sr, remote, filePath}) ->
return @importDeltaVmBackup({sr, remoteId: remote, filePath})
importDeltaBackup.params = {
sr: { type: 'string' }
remote: { type: 'string' }
filePath: { type: 'string' }
}
importDeltaBackup.resolve = {
sr: [ 'sr', 'SR', 'operate' ]
}
importDeltaBackup.permission = 'admin'
exports.importDeltaBackup = importDeltaBackup
#---------------------------------------------------------------------
deltaCopy = ({ vm, sr }) -> @deltaCopyVm(vm, sr)
deltaCopy.params = {
vm: { type: 'string' },
sr: { type: 'string' }
}
deltaCopy.resolve = {
vm: [ 'vm', 'VM', 'operate'],
sr: [ 'sr', 'SR', 'operate']
}
exports.deltaCopy = deltaCopy
#---------------------------------------------------------------------
rollingSnapshot = $coroutine ({vm, tag, depth}) ->
snapshot = yield @getXAPI(vm).rollingSnapshotVm(vm.ref, tag, depth)
return snapshot.$id
yield checkPermissionOnSrs.call(this, vm)
yield @rollingSnapshotVm(vm, tag, depth)
rollingSnapshot.params = {
id: { type: 'string' }
@@ -436,18 +572,19 @@ rollingSnapshot.resolve = {
vm: ['id', 'VM', 'administrate']
}
rollingSnapshot.description = 'Snaphots a VM with a tagged name, and removes the oldest snapshot with the same tag according to depth'
rollingSnapshot.description = 'Snapshots a VM with a tagged name, and removes the oldest snapshot with the same tag according to depth'
exports.rollingSnapshot = rollingSnapshot
#---------------------------------------------------------------------
backup = $coroutine ({vm, pathToFile, compress, onlyMetadata}) ->
yield @backupVm({vm, pathToFile, compress, onlyMetadata})
backup = $coroutine ({vm, remoteId, file, compress, onlyMetadata}) ->
yield @backupVm({vm, remoteId, file, compress, onlyMetadata})
backup.params = {
id: { type: 'string' }
pathToFile: { type: 'string' }
id: {type: 'string'}
remoteId: { type: 'string' }
file: { type: 'string' }
compress: { type: 'boolean', optional: true }
onlyMetadata: { type: 'boolean', optional: true }
}
@@ -462,13 +599,32 @@ exports.backup = backup
#---------------------------------------------------------------------
importBackup = $coroutine ({remote, file, sr}) ->
yield @importVmBackup(remote, file, sr)
return
importBackup.permission = 'admin'
importBackup.description = 'Imports a VM into host, from a file found in the chosen remote'
importBackup.params = {
remote: {type: 'string'},
file: {type: 'string'},
sr: {type: 'string'}
}
importBackup.resolve = {
sr: [ 'sr', 'SR', 'operate' ]
}
importBackup.permission = 'admin'
exports.importBackup = importBackup
#---------------------------------------------------------------------
rollingBackup = $coroutine ({vm, remoteId, tag, depth, compress, onlyMetadata}) ->
remote = yield @getRemote remoteId
if not remote?.path?
throw new Error "No such Remote #{remoteId}"
return yield @rollingBackupVm({
vm,
path: remote.path,
remoteId,
tag,
depth,
compress,
@@ -493,14 +649,31 @@ exports.rollingBackup = rollingBackup
#---------------------------------------------------------------------
start = $coroutine ({vm}) ->
yield @getXAPI(vm).call(
'VM.start', vm.ref
false # Start paused?
false # Skips the pre-boot checks?
)
rollingDrCopy = ({vm, pool, tag, depth}) ->
if vm.$pool is pool.id
throw new JsonRpcError('Disaster Recovery attempts to copy on the same pool')
return @rollingDrCopyVm({vm, sr: @getObject(pool.default_SR, 'SR'), tag, depth})
return true
rollingDrCopy.params = {
id: { type: 'string' }
pool: { type: 'string' }
tag: { type: 'string'}
depth: { type: 'number' }
}
rollingDrCopy.resolve = {
vm: ['id', ['VM', 'VM-snapshot'], 'administrate'],
pool: ['pool', 'pool', 'administrate']
}
rollingDrCopy.description = 'Copies a VM to a different pool, with a tagged name, and removes the oldest VM with the same tag from this pool, according to depth'
exports.rollingDrCopy = rollingDrCopy
#---------------------------------------------------------------------
start = ({vm}) ->
return @getXapi(vm).startVm(vm._xapiId)
start.params = {
id: { type: 'string' }
@@ -519,16 +692,16 @@ exports.start = start
# - if force is true → hard shutdown
# - if force is integer → clean shutdown and after force seconds, hard shutdown.
stop = $coroutine ({vm, force}) ->
xapi = @getXAPI vm
xapi = @getXapi vm
# Hard shutdown
if force
yield xapi.call 'VM.hard_shutdown', vm.ref
yield xapi.call 'VM.hard_shutdown', vm._xapiRef
return true
# Clean shutdown
try
yield xapi.call 'VM.clean_shutdown', vm.ref
yield xapi.call 'VM.clean_shutdown', vm._xapiRef
catch error
if error.code is 'VM_MISSING_PV_DRIVERS' or error.code is 'VM_LACKS_FEATURE_SHUTDOWN'
# TODO: Improve reporting: this message is unclear.
@@ -552,7 +725,7 @@ exports.stop = stop
#---------------------------------------------------------------------
suspend = $coroutine ({vm}) ->
yield @getXAPI(vm).call 'VM.suspend', vm.ref
yield @getXapi(vm).call 'VM.suspend', vm._xapiRef
return true
@@ -572,7 +745,7 @@ resume = $coroutine ({vm, force}) ->
if not force
force = true
yield @getXAPI(vm).call 'VM.resume', vm.ref, false, force
yield @getXapi(vm).call 'VM.resume', vm._xapiRef, false, force
return true
@@ -591,7 +764,7 @@ exports.resume = resume
# revert a snapshot to its parent VM
revert = $coroutine ({snapshot}) ->
# Attempts a revert from this snapshot to its parent VM
yield @getXAPI(snapshot).call 'VM.revert', snapshot.ref
yield @getXapi(snapshot).call 'VM.revert', snapshot._xapiRef
return true
@@ -606,29 +779,39 @@ exports.revert = revert
#---------------------------------------------------------------------
handleExport = (req, res, { stream }) ->
upstream = stream.response
handleExport = $coroutine (req, res, {xapi, id, compress, onlyMetadata}) ->
stream = yield xapi.exportVm(id, {
compress: compress ? true,
onlyMetadata: onlyMetadata ? false
})
res.on('close', () ->
stream.cancel()
)
# Remove the filename as it is already part of the URL.
upstream.headers['content-disposition'] = 'attachment'
stream.headers['content-disposition'] = 'attachment'
res.writeHead(
upstream.statusCode,
upstream.statusMessage ? '',
upstream.headers
stream.statusCode,
stream.statusMessage ? '',
stream.headers
)
stream.pipe(res)
return
# TODO: integrate in xapi.js
export_ = $coroutine ({vm, compress, onlyMetadata}) ->
stream = yield @getXAPI(vm).exportVm(vm.id, {
compress: compress ? true,
onlyMetadata: onlyMetadata ? false
})
if vm.power_state is 'Running'
yield checkPermissionOnSrs.call(this, vm)
data = {
xapi: @getXapi(vm),
id: vm._xapiId,
compress,
onlyMetadata
}
return {
$getFrom: yield @registerHttpRequest(handleExport, { stream }, {
$getFrom: yield @registerHttpRequest(handleExport, data, {
suffix: encodeURI("/#{vm.name_label}.xva")
})
}
@@ -646,33 +829,48 @@ exports.export = export_;
#---------------------------------------------------------------------
# FIXME
handleVmImport = $coroutine (req, res, { xapi, srId }) ->
# Timeout seems to be broken in Node 4.
# See https://github.com/nodejs/node/issues/3319
req.setTimeout(43200000) # 12 hours
try
vm = yield xapi.importVm(req, { srId })
res.end(format.response(0, vm.$id))
catch e
res.writeHead(500)
res.end(format.error(new JsonRpcError(e.message)))
return
# TODO: "sr_id" can be passed in URL to target a specific SR
import_ = $coroutine ({host}) ->
import_ = $coroutine ({host, sr}) ->
if not sr
if not host
throw new InvalidParameters('you must provide either host or SR')
{sessionId} = @getXAPI(host)
xapi = @getXapi(host)
sr = xapi.pool.$default_SR
if not sr
throw new InvalidParameters('there is not default SR in this pool')
else
xapi = @getXapi(sr)
url = yield @registerProxyRequest {
# Receive a POST but send a PUT.
method: 'put'
proxyMethod: 'post'
hostname: host.address
pathname: '/import/'
query: {
session_id: sessionId
}
}
return {
$sendTo: url
$sendTo: yield @registerHttpRequest(handleVmImport, {
srId: sr._xapiId,
xapi
})
}
import_.params = {
host: { type: 'string' }
host: { type: 'string', optional: true },
sr: { type: 'string', optional: true }
}
import_.resolve = {
host: ['host', 'host', 'administrate']
host: ['host', 'host', 'administrate'],
sr: ['sr', 'SR', 'administrate']
}
exports.import = import_
@@ -681,7 +879,7 @@ exports.import = import_
# FIXME: if position is used, all other disks after this position
# should be shifted.
attachDisk = $coroutine ({vm, vdi, position, mode, bootable}) ->
yield @getXAPI(vm).attachVdiToVm(vdi.id, vm.id, {
yield @getXapi(vm).attachVdiToVm(vdi._xapiId, vm._xapiId, {
bootable,
position,
readOnly: mode is 'RO'
@@ -710,7 +908,7 @@ exports.attachDisk = attachDisk
# FIXME: position should be optional and default to last.
createInterface = $coroutine ({vm, network, position, mtu, mac}) ->
vif = yield @getXAPI(vm).createVif(vm.id, network.id, {
vif = yield @getXapi(vm).createVif(vm._xapiId, network._xapiId, {
mac,
mtu,
position
@@ -735,9 +933,9 @@ exports.createInterface = createInterface
#---------------------------------------------------------------------
attachPci = $coroutine ({vm, pciId}) ->
xapi = @getXAPI vm
xapi = @getXapi vm
yield xapi.call 'VM.add_to_other_config', vm.ref, 'pci', pciId
yield xapi.call 'VM.add_to_other_config', vm._xapiRef, 'pci', pciId
return true
@@ -755,9 +953,9 @@ exports.attachPci = attachPci
#---------------------------------------------------------------------
detachPci = $coroutine ({vm}) ->
xapi = @getXAPI vm
xapi = @getXapi vm
yield xapi.call 'VM.remove_from_other_config', vm.ref, 'pci'
yield xapi.call 'VM.remove_from_other_config', vm._xapiRef, 'pci'
return true
@@ -772,103 +970,18 @@ detachPci.resolve = {
exports.detachPci = detachPci
#---------------------------------------------------------------------
stats = $coroutine ({vm, granularity}) ->
granularity = if granularity then granularity else 0
# granularity: 0: every 5 sec along last 10 minutes, 1: every minute along last 2 hours, 2: every hour along past week, 3: everyday along past year
# see http://xenserver.org/partners/developing-products-for-xenserver/18-sdk-development/96-xs-dev-rrds.html
xapi = @getXAPI vm
stats = yield @getXapiVmStats(vm, granularity)
return stats
host = @getObject vm.$container
do (type = host.type) =>
if type is 'pool'
host = @getObject host.master, 'host'
else unless type is 'host'
throw new Error "unexpected type: got #{type} instead of host"
{body} = response = yield got(
"https://#{host.address}/vm_rrd?session_id=#{xapi.sessionId}&uuid=#{vm.id}",
{ rejectUnauthorized: false }
)
if response.statusCode isnt 200
throw new Error('Cannot fetch the RRDs')
json = parseXml(body)
# Find index of needed objects for getting their values after
cpusIndexes = []
vifsIndexes = []
xvdsIndexes = []
memoryFreeIndex = []
memoryIndex = []
index = 0
$forEach(json.rrd.ds, (value, i) ->
if /^cpu[0-9]+$/.test(value.name)
cpusIndexes.push(i)
else if startsWith(value.name, 'vif_') && endsWith(value.name, '_tx')
vifsIndexes.push(i)
else if startsWith(value.name, 'vif_') && endsWith(value.name, '_rx')
vifsIndexes.push(i)
else if startsWith(value.name, 'vbd_xvd') && endsWith(value.name, '_write', 14)
xvdsIndexes.push(i)
else if startsWith(value.name, 'vbd_xvd') && endsWith(value.name, '_read', 13)
xvdsIndexes.push(i)
else if startsWith(value.name, 'memory_internal_free')
memoryFreeIndex.push(i)
else if endsWith(value.name, 'memory')
memoryIndex.push(i)
return
)
memoryFree = []
memoryUsed = []
memory = []
cpus = []
vifs = []
xvds = []
date = []
archive = json.rrd.rra[granularity]
dateStep = json.rrd.step * archive.pdp_per_row
baseDate = json.rrd.lastupdate - (json.rrd.lastupdate % dateStep)
numStep = archive.database.row.length - 1
$forEach archive.database.row, (n, key) ->
# WARNING! memoryFree is in Kb not in b, memory is in b
memoryFree.push(n.v[memoryFreeIndex]*1024)
memoryUsed.push(Math.round(parseInt(n.v[memoryIndex])-(n.v[memoryFreeIndex]*1024)))
memory.push(parseInt(n.v[memoryIndex]))
date.push(baseDate - (dateStep * (numStep - key)))
# build the multi dimensional arrays
$forEach cpusIndexes, (value, key) ->
cpus[key] ?= []
cpus[key].push(n.v[value]*100)
return
$forEach vifsIndexes, (value, key) ->
vifs[key] ?= []
vifs[key].push(if n.v[value] == 'NaN' then null else n.v[value]) # * (if key % 2 then -1 else 1))
return
$forEach xvdsIndexes, (value, key) ->
xvds[key] ?= []
xvds[key].push(if n.v[value] == 'NaN' then null else n.v[value]) # * (if key % 2 then -1 else 1))
return
return
# the final object
return {
memoryFree: memoryFree
memoryUsed: memoryUsed
memory: memory
date: date
cpus: cpus
vifs: vifs
xvds: xvds
}
stats.description = 'returns statistics about the VM'
stats.params = {
id: { type: 'string' }
id: { type: 'string' },
granularity: {
type: 'string',
optional: true
}
}
stats.resolve = {
@@ -879,25 +992,83 @@ exports.stats = stats;
#---------------------------------------------------------------------
# TODO: rename to setBootOrder
# TODO: check current VM is HVM
bootOrder = $coroutine ({vm, order}) ->
xapi = @getXAPI vm
setBootOrder = $coroutine ({vm, order}) ->
xapi = @getXapi vm
order = {order: order}
if vm.virtualizationMode == 'hvm'
yield xapi.call 'VM.set_HVM_boot_params', vm._xapiRef, order
return true
yield xapi.call 'VM.set_HVM_boot_params', vm.ref, order
@throw(
'INVALID_PARAMS'
'You can only set the boot order on a HVM guest'
)
return true
bootOrder.params = {
setBootOrder.params = {
vm: { type: 'string' },
order: { type: 'string' }
}
bootOrder.resolve = {
setBootOrder.resolve = {
vm: ['vm', 'VM', 'operate'],
}
exports.bootOrder = bootOrder
exports.setBootOrder = setBootOrder
#---------------------------------------------------------------------
recoveryStart = ({vm}) ->
return @getXapi(vm).startVmOnCd(vm._xapiId)
recoveryStart.params = {
id: { type: 'string' }
}
recoveryStart.resolve = {
vm: ['id', 'VM', 'operate'],
}
exports.recoveryStart = recoveryStart
#---------------------------------------------------------------------
getCloudInitConfig = $coroutine ({template}) ->
return yield @getXapi(template).getCloudInitConfig(template._xapiId)
getCloudInitConfig.params = {
template: { type: 'string' }
}
getCloudInitConfig.resolve = {
template: ['template', 'VM-template', 'administrate'],
}
exports.getCloudInitConfig = getCloudInitConfig
#---------------------------------------------------------------------
createCloudInitConfigDrive = $coroutine ({vm, sr, config, coreos}) ->
xapi = @getXapi vm
# CoreOS is a special CloudConfig drive created by XS plugin
if coreos
yield xapi.createCoreOsCloudInitConfigDrive(vm._xapiId, sr._xapiId, config)
# use generic Cloud Init drive
else
yield xapi.createCloudInitConfigDrive(vm._xapiId, sr._xapiId, config)
return true
createCloudInitConfigDrive.params = {
vm: { type: 'string' },
sr: { type: 'string' },
config: { type: 'string' }
}
createCloudInitConfigDrive.resolve = {
vm: ['vm', 'VM', 'administrate'],
sr: [ 'sr', 'SR', 'operate' ]
}
exports.createCloudInitConfigDrive = createCloudInitConfigDrive
#=====================================================================
Object.defineProperty(exports, '__esModule', {
value: true
})

View File

@@ -1,14 +1,15 @@
import Bluebird from 'bluebird'
import Collection, {ModelAlreadyExists} from '../collection'
import difference from 'lodash.difference'
import filter from 'lodash.filter'
import forEach from 'lodash.foreach'
import getKey from 'lodash.keys'
import isEmpty from 'lodash.isempty'
import map from 'lodash.map'
import {createClient as createRedisClient, RedisClient, Multi} from 'redis'
import {promisifyAll} from '../utils'
import {
forEach,
isEmpty,
mapToArray,
promisifyAll
} from '../utils'
// ===================================================================
@@ -53,7 +54,7 @@ export default class Redis extends Collection {
const {redis} = this
const models = []
return Bluebird.map(ids, id => {
return Promise.all(mapToArray(ids, id => {
return redis.hgetallAsync(prefix + id).then(model => {
// If empty, consider it a no match.
if (isEmpty(model)) {
@@ -65,7 +66,7 @@ export default class Redis extends Collection {
models.push(model)
})
}).return(models)
})).then(() => models)
}
_add (models, {replace = false} = {}) {
@@ -74,7 +75,7 @@ export default class Redis extends Collection {
const {indexes, prefix, redis, idPrefix = ''} = this
return Bluebird.map(models, async function (model) {
return Promise.all(mapToArray(models, async model => {
// Generate a new identifier if necessary.
if (model.id === undefined) {
model.id = idPrefix + String(await redis.incrAsync(prefix + '_id'))
@@ -99,8 +100,10 @@ export default class Redis extends Collection {
params.push(name, value)
})
const key = `${prefix}:${model.id}`
const promises = [
redis.hmsetAsync(prefix + ':' + model.id, ...params)
redis.delAsync(key),
redis.hmsetAsync(key, ...params)
]
// Update indexes.
@@ -117,7 +120,7 @@ export default class Redis extends Collection {
await Promise.all(promises)
return model
})
}))
}
_get (properties) {
@@ -146,7 +149,7 @@ export default class Redis extends Collection {
throw new Error('fields not indexed: ' + unfit.join())
}
const keys = map(properties, (value, index) => prefix + '_' + index + ':' + value)
const keys = mapToArray(properties, (value, index) => `${prefix}_${index}:${value}`)
return redis.sinterAsync(...keys).then(ids => this._extract(ids))
}
@@ -160,7 +163,7 @@ export default class Redis extends Collection {
redis.sremAsync(prefix + '_ids', ...ids),
// Remove the models.
redis.delAsync(map(ids, id => prefix + ':' + id))
redis.delAsync(mapToArray(ids, id => `${prefix}:${id}`))
])
}

View File

@@ -1,8 +1,6 @@
import {EventEmitter} from 'events'
// ===================================================================
// const noop = () => {}
import {createRawObject, noop} from './utils'
// ===================================================================
@@ -10,13 +8,13 @@ export default class Connection extends EventEmitter {
constructor () {
super()
this._data = Object.create(null)
this._data = createRawObject()
}
// Close the connection.
close () {
// Prevent errors when the connection is closed more than once.
// this.close = noop
this.close = noop
this.emit('close')
}

View File

@@ -1,69 +1,83 @@
import bind from 'lodash.bind'
import isArray from 'lodash.isarray'
import isFunction from 'lodash.isfunction'
import {
isPromise,
noop,
pFinally
} from './utils'
// ===================================================================
const {defineProperty} = Object
const {
defineProperties,
defineProperty,
getOwnPropertyDescriptor
} = Object
// ===================================================================
// See: https://github.com/jayphelps/core-decorators.js#autobind
export function autobind (target, key, {
//
// TODO: make it work for all class methods.
export const autobind = (target, key, {
configurable,
enumerable,
value: fn,
writable
}) {
return {
configurable,
enumerable,
}) => ({
configurable,
enumerable,
get () {
const bounded = bind(fn, this)
get () {
const bounded = bind(fn, this)
defineProperty(this, key, {
configurable: true,
enumerable: false,
value: bounded,
writable: true
})
return bounded
},
set (newValue) {
if (this === target) {
// New value directly set on the prototype.
delete this[key]
this[key] = newValue
} else {
// New value set on a child object.
// Cannot use assignment because it will call the setter on
// the prototype.
defineProperty(this, key, {
configurable: true,
enumerable: false,
value: bounded,
enumerable: true,
value: newValue,
writable: true
})
return bounded
},
set (newValue) {
if (this === target) {
// New value directly set on the prototype.
delete this[key]
this[key] = newValue
} else {
// New value set on a child object.
// Cannot use assignment because it will call the setter on
// the prototype.
defineProperty(this, key, {
configurable: true,
enumerable: true,
value: newValue,
writable: true
})
}
}
}
}
})
// -------------------------------------------------------------------
// Debounce decorator for methods.
//
// See: https://github.com/wycats/javascript-decorators
export const debounce = (duration) => (target, name, descriptor) => {
const {value: fn} = descriptor
//
// TODO: make it work for single functions.
export const debounce = duration => (target, name, descriptor) => {
const fn = descriptor.value
// This symbol is used to store the related data directly on the
// current object.
const s = Symbol()
function debounced () {
let data = this[s] || (this[s] = {
const data = this[s] || (this[s] = {
lastCall: 0,
wrapper: null
})
@@ -80,8 +94,233 @@ export const debounce = (duration) => (target, name, descriptor) => {
}
return data.wrapper()
}
debounced.reset = (obj) => { delete obj[s] }
debounced.reset = obj => { delete obj[s] }
descriptor.value = debounced
return descriptor
}
// -------------------------------------------------------------------
const _push = Array.prototype.push
export const deferrable = (target, name, descriptor) => {
let fn
function newFn () {
const deferreds = []
const defer = fn => {
deferreds.push(fn)
}
defer.clear = () => {
deferreds.length = 0
}
const args = [ defer ]
_push.apply(args, arguments)
let executeDeferreds = () => {
let i = deferreds.length
while (i) {
deferreds[--i]()
}
}
try {
const result = fn.apply(this, args)
if (isPromise(result)) {
result::pFinally(executeDeferreds)
// Do not execute the deferreds in the finally block.
executeDeferreds = noop
}
return result
} finally {
executeDeferreds()
}
}
if (descriptor) {
fn = descriptor.value
descriptor.value = newFn
return descriptor
}
fn = target
return newFn
}
// Deferred functions are only executed on failures.
//
// i.e.: defer.clear() is automatically called in case of success.
deferrable.onFailure = (target, name, descriptor) => {
let fn
function newFn (defer) {
const result = fn.apply(this, arguments)
return isPromise(result)
? result.then(result => {
defer.clear()
return result
})
: (defer.clear(), result)
}
if (descriptor) {
fn = descriptor.value
descriptor.value = newFn
} else {
fn = target
target = newFn
}
return deferrable(target, name, descriptor)
}
// Deferred functions are only executed on success.
//
// i.e.: defer.clear() is automatically called in case of failure.
deferrable.onSuccess = (target, name, descriptor) => {
let fn
function newFn (defer) {
try {
const result = fn.apply(this, arguments)
return isPromise(result)
? result.then(null, error => {
defer.clear()
throw error
})
: result
} catch (error) {
defer.clear()
throw error
}
}
if (descriptor) {
fn = descriptor.value
descriptor.value = newFn
} else {
fn = target
target = newFn
}
return deferrable(target, name, descriptor)
}
// -------------------------------------------------------------------
const _ownKeys = (
typeof Reflect !== 'undefined' && Reflect.ownKeys ||
(({
getOwnPropertyNames: names,
getOwnPropertySymbols: symbols
}) => symbols
? obj => names(obj).concat(symbols(obj))
: names
)(Object)
)
const _bindPropertyDescriptor = (descriptor, thisArg) => {
const { get, set, value } = descriptor
if (get) {
descriptor.get = bind(get, thisArg)
}
if (set) {
descriptor.set = bind(set, thisArg)
}
if (isFunction(value)) {
descriptor.value = bind(value, thisArg)
}
return descriptor
}
const _isIgnoredProperty = name => (
name[0] === '_' ||
name === 'constructor'
)
const _IGNORED_STATIC_PROPERTIES = {
__proto__: null,
arguments: true,
caller: true,
length: true,
name: true,
prototype: true
}
const _isIgnoredStaticProperty = name => _IGNORED_STATIC_PROPERTIES[name]
export const mixin = MixIns => Class => {
if (!isArray(MixIns)) {
MixIns = [ MixIns ]
}
const { name } = Class
const Decorator = (...args) => {
const instance = new Class(...args)
for (const MixIn of MixIns) {
const { prototype } = MixIn
const mixinInstance = new MixIn(instance)
const descriptors = { __proto__: null }
for (const prop of _ownKeys(prototype)) {
if (_isIgnoredProperty(prop)) {
continue
}
if (prop in instance) {
throw new Error(`${name}#${prop} is already defined`)
}
descriptors[prop] = _bindPropertyDescriptor(
getOwnPropertyDescriptor(prototype, prop),
mixinInstance
)
}
defineProperties(instance, descriptors)
}
return instance
}
// Copy original and mixed-in static properties on Decorator class.
const descriptors = { __proto__: null }
for (const prop of _ownKeys(Class)) {
let descriptor
if (!(
// Special properties are not defined...
_isIgnoredStaticProperty(prop) &&
// if they already exist...
(descriptor = getOwnPropertyDescriptor(Decorator, prop)) &&
// and are not configurable.
!descriptor.configurable
)) {
descriptors[prop] = getOwnPropertyDescriptor(Class, prop)
}
}
for (const MixIn of MixIns) {
for (const prop of _ownKeys(MixIn)) {
if (_isIgnoredStaticProperty(prop)) {
continue
}
if (prop in descriptors) {
throw new Error(`${name}.${prop} is already defined`)
}
descriptors[prop] = getOwnPropertyDescriptor(MixIn, prop)
}
}
defineProperties(Decorator, descriptors)
return Decorator
}

View File

@@ -4,11 +4,11 @@ import expect from 'must'
// ===================================================================
import {autobind, debounce} from './decorators'
import {autobind, debounce, deferrable} from './decorators'
// ===================================================================
describe('autobind', function () {
describe('autobind()', () => {
class Foo {
@autobind
getFoo () {
@@ -16,25 +16,25 @@ describe('autobind', function () {
}
}
it('returns a bound instance for a method', function () {
it('returns a bound instance for a method', () => {
const foo = new Foo()
const {getFoo} = foo
const { getFoo } = foo
expect(getFoo()).to.equal(foo)
})
it('returns the same bound instance each time', function () {
it('returns the same bound instance each time', () => {
const foo = new Foo()
expect(foo.getFoo).to.equal(foo.getFoo)
})
it('works with multiple instances of the same class', function () {
it('works with multiple instances of the same class', () => {
const foo1 = new Foo()
const foo2 = new Foo()
const {getFoo: getFoo1} = foo1
const {getFoo: getFoo2} = foo2
const getFoo1 = foo1.getFoo
const getFoo2 = foo2.getFoo
expect(getFoo1()).to.equal(foo1)
expect(getFoo2()).to.equal(foo2)
@@ -43,7 +43,7 @@ describe('autobind', function () {
// -------------------------------------------------------------------
describe('debounce', function () {
describe('debounce()', () => {
let i
class Foo {
@@ -53,11 +53,11 @@ describe('debounce', function () {
}
}
beforeEach(function () {
beforeEach(() => {
i = 0
})
it('works', function (done) {
it('works', done => {
const foo = new Foo()
expect(i).to.equal(0)
@@ -68,7 +68,7 @@ describe('debounce', function () {
foo.foo()
expect(i).to.equal(1)
setTimeout(function () {
setTimeout(() => {
foo.foo()
expect(i).to.equal(2)
@@ -76,3 +76,98 @@ describe('debounce', function () {
}, 2e1)
})
})
// -------------------------------------------------------------------
describe('deferrable()', () => {
it('works with normal termination', () => {
let i = 0
const fn = deferrable(defer => {
i += 2
defer(() => { i -= 2 })
i *= 2
defer(() => { i /= 2 })
return i
})
expect(fn()).to.equal(4)
expect(i).to.equal(0)
})
it('defer.clear() removes previous deferreds', () => {
let i = 0
const fn = deferrable(defer => {
i += 2
defer(() => { i -= 2 })
defer.clear()
i *= 2
defer(() => { i /= 2 })
return i
})
expect(fn()).to.equal(4)
expect(i).to.equal(2)
})
it('works with exception', () => {
let i = 0
const fn = deferrable(defer => {
i += 2
defer(() => { i -= 2 })
i *= 2
defer(() => { i /= 2 })
throw i
})
expect(() => fn()).to.throw(4)
expect(i).to.equal(0)
})
it('works with promise resolution', async () => {
let i = 0
const fn = deferrable(async defer => {
i += 2
defer(() => { i -= 2 })
i *= 2
defer(() => { i /= 2 })
// Wait a turn of the events loop.
await Promise.resolve()
return i
})
await expect(fn()).to.eventually.equal(4)
expect(i).to.equal(0)
})
it('works with promise rejection', async () => {
let i = 0
const fn = deferrable(async defer => {
// Wait a turn of the events loop.
await Promise.resolve()
i += 2
defer(() => { i -= 2 })
i *= 2
defer(() => { i /= 2 })
// Wait a turn of the events loop.
await Promise.resolve()
throw i
})
await expect(fn()).to.reject.to.equal(4)
expect(i).to.equal(0)
})
})

84
src/fatfs-buffer.js Normal file
View File

@@ -0,0 +1,84 @@
// Buffer driver for [fatfs](https://github.com/natevw/fatfs).
//
// Usage:
//
// ```js
// import fatfs from 'fatfs'
// import fatfsBuffer, { init as fatfsBufferInit } from './fatfs-buffer'
//
// const buffer = fatfsBufferinit()
//
// const fs = fatfs.createFileSystem(fatfsBuffer(buffer))
//
// fs.writeFile('/foo', 'content of foo', function (err, content) {
// if (err) {
// console.error(err)
// }
// })
import { boot16 as fat16 } from 'fatfs/structs'
const SECTOR_SIZE = 512
// Creates a 10MB buffer and initializes it as a FAT 16 volume.
export function init () {
const buf = new Buffer(10 * 1024 * 1024) // 10MB
buf.fill(0)
// https://github.com/natevw/fatfs/blob/master/structs.js
fat16.pack({
jmpBoot: new Buffer('eb3c90', 'hex'),
OEMName: 'mkfs.fat',
BytsPerSec: SECTOR_SIZE,
SecPerClus: 4,
ResvdSecCnt: 1,
NumFATs: 2,
RootEntCnt: 512,
TotSec16: 20480,
Media: 248,
FATSz16: 20,
SecPerTrk: 32,
NumHeads: 64,
HiddSec: 0,
TotSec32: 0,
DrvNum: 128,
Reserved1: 0,
BootSig: 41,
VolID: 895111106,
VolLab: 'NO NAME ',
FilSysType: 'FAT16 '
}, buf)
// End of sector.
buf[0x1fe] = 0x55
buf[0x1ff] = 0xaa
// Mark sector as reserved.
buf[0x200] = 0xf8
buf[0x201] = 0xff
buf[0x202] = 0xff
buf[0x203] = 0xff
// Mark sector as reserved.
buf[0x2a00] = 0xf8
buf[0x2a01] = 0xff
buf[0x2a02] = 0xff
buf[0x2a03] = 0xff
return buf
}
export default buffer => {
return {
sectorSize: SECTOR_SIZE,
numSectors: Math.floor(buffer.length / SECTOR_SIZE),
readSectors: (i, target, cb) => {
buffer.copy(target, 0, i * SECTOR_SIZE)
cb()
},
writeSectors: (i, source, cb) => {
source.copy(buffer, i * SECTOR_SIZE, 0)
cb()
}
}
}

54
src/glob-matcher.js Normal file
View File

@@ -0,0 +1,54 @@
// See: https://gist.github.com/julien-f/5b9a3537eb82a34b04e2
var matcher = require('micromatch').matcher
module.exports = function globMatcher (patterns, opts) {
if (!Array.isArray(patterns)) {
if (patterns[0] === '!') {
var m = matcher(patterns.slice(1), opts)
return function (string) {
return !m(string)
}
} else {
return matcher(patterns, opts)
}
}
var noneMustMatch = []
var anyMustMatch = []
// TODO: could probably be optimized by combining all positive patterns (and all negative patterns) as a single matcher.
for (var i = 0, n = patterns.length; i < n; ++i) {
var pattern = patterns[i]
if (pattern[0] === '!') {
noneMustMatch.push(matcher(pattern.slice(1), opts))
} else {
anyMustMatch.push(matcher(pattern, opts))
}
}
var nNone = noneMustMatch.length
var nAny = anyMustMatch.length
return function (string) {
var i
for (i = 0; i < nNone; ++i) {
if (noneMustMatch[i](string)) {
return false
}
}
if (nAny === 0) {
return true
}
for (i = 0; i < nAny; ++i) {
if (anyMustMatch[i](string)) {
return true
}
}
return false
}
}

121
src/http-request.js Normal file
View File

@@ -0,0 +1,121 @@
import assign from 'lodash.assign'
import getStream from 'get-stream'
import isString from 'lodash.isstring'
import startsWith from 'lodash.startswith'
import { parse as parseUrl } from 'url'
import { request as httpRequest } from 'http'
import { request as httpsRequest } from 'https'
import { stringify as formatQueryString } from 'querystring'
// -------------------------------------------------------------------
export default (...args) => {
let req
const pResponse = new Promise((resolve, reject) => {
const opts = {}
for (let i = 0, { length } = args; i < length; ++i) {
const arg = args[i]
assign(opts, isString(arg) ? parseUrl(arg) : arg)
}
const {
body,
headers: { ...headers } = {},
protocol,
query,
...rest
} = opts
if (headers['content-length'] == null && body != null) {
let tmp
if (isString(body)) {
headers['content-length'] = Buffer.byteLength(body)
} else if (
(
(tmp = body.headers) &&
(tmp = tmp['content-length']) != null
) ||
(tmp = body.length) != null
) {
headers['content-length'] = tmp
}
}
if (query) {
rest.path = `${rest.pathname || rest.path || '/'}?${
isString(query)
? query
: formatQueryString(query)
}`
}
// Some headers can be explicitly removed by setting them to null.
const headersToRemove = []
for (const header in headers) {
if (headers[header] === null) {
delete headers[header]
headersToRemove.push(header)
}
}
req = (
protocol && startsWith(protocol.toLowerCase(), 'https')
? httpsRequest
: httpRequest
)({
...rest,
headers
})
for (let i = 0, { length } = headersToRemove; i < length; ++i) {
req.removeHeader(headersToRemove[i])
}
if (body) {
if (typeof body.pipe === 'function') {
body.pipe(req)
} else {
req.end(body)
}
} else {
req.end()
}
req.on('error', reject)
req.once('response', resolve)
}).then(response => {
response.cancel = () => {
req.abort()
}
response.readAll = () => getStream(response)
const length = response.headers['content-length']
if (length) {
response.length = length
}
const code = response.statusCode
if (code < 200 || code >= 300) {
const error = new Error(response.statusMessage)
error.code = code
Object.defineProperty(error, 'response', {
configurable: true,
value: response,
writable: true
})
throw error
}
return response
})
pResponse.cancel = () => {
req.emit('error', new Error('HTTP request canceled!'))
req.abort()
}
pResponse.readAll = () => pResponse.then(response => response.readAll())
pResponse.request = req
return pResponse
}

View File

@@ -2,23 +2,22 @@ import createLogger from 'debug'
const debug = createLogger('xo:main')
import appConf from 'app-conf'
import assign from 'lodash.assign'
import bind from 'lodash.bind'
import blocked from 'blocked'
import createExpress from 'express'
import eventToPromise from 'event-to-promise'
import forEach from 'lodash.foreach'
import has from 'lodash.has'
import helmet from 'helmet'
import includes from 'lodash.includes'
import isArray from 'lodash.isarray'
import isFunction from 'lodash.isfunction'
import map from 'lodash.map'
import pick from 'lodash.pick'
import proxyConsole from './proxy-console'
import proxyRequest from 'proxy-http-request'
import serveStatic from 'serve-static'
import startsWith from 'lodash.startswith'
import WebSocket from 'ws'
import {compile as compileJade} from 'jade'
import {posix as posixPath} from 'path'
import {
AlreadyAuthenticated,
@@ -28,23 +27,30 @@ import {
NotImplemented
} from './api-errors'
import JsonRpcPeer from 'json-rpc-peer'
import {readFile} from 'fs-promise'
import {
readFile,
readdir
} from 'fs-promise'
import * as apiMethods from './api/index'
import Api from './api'
import JobExecutor from './job-executor'
import RemoteHandler from './remote-handler'
import Scheduler from './scheduler'
import WebServer from 'http-server-plus'
import wsProxy from './ws-proxy'
import Xo from './xo'
import {
createRawObject,
forEach,
mapToArray,
pFromCallback
} from './utils'
import bodyParser from 'body-parser'
import connectFlash from 'connect-flash'
import cookieParser from 'cookie-parser'
import expressSession from 'express-session'
import passport from 'passport'
import {Strategy as LocalStrategy} from 'passport-local'
import { parse as parseCookies } from 'cookie'
import { Strategy as LocalStrategy } from 'passport-local'
// ===================================================================
@@ -58,15 +64,6 @@ const warn = (...args) => {
// ===================================================================
const DEFAULTS = {
http: {
listen: [
{ port: 80 }
],
mounts: {}
}
}
const DEPRECATED_ENTRIES = [
'users',
'servers'
@@ -74,7 +71,6 @@ const DEPRECATED_ENTRIES = [
async function loadConfiguration () {
const config = await appConf.load('xo-server', {
defaults: DEFAULTS,
ignoreUnknownFormats: true
})
@@ -95,6 +91,8 @@ async function loadConfiguration () {
function createExpressApp () {
const app = createExpress()
app.use(helmet())
// Registers the cookie-parser and express-session middlewares,
// necessary for connect-flash.
app.use(cookieParser())
@@ -121,7 +119,7 @@ function createExpressApp () {
}
async function setUpPassport (express, xo) {
const strategies = Object.create(null)
const strategies = createRawObject()
xo.registerPassportStrategy = strategy => {
passport.use(strategy)
@@ -144,9 +142,6 @@ async function setUpPassport (express, xo) {
const SIGNIN_STRATEGY_RE = /^\/signin\/([^/]+)(\/callback)?(:?\?.*)?$/
express.use(async (req, res, next) => {
// A relative path is needed to avoid breaking reverse proxies.
const basePath = posixPath.relative(req.path, '/')
const matches = req.url.match(SIGNIN_STRATEGY_RE)
if (matches) {
@@ -157,7 +152,7 @@ async function setUpPassport (express, xo) {
if (!user) {
req.flash('error', info ? info.message : 'Invalid credentials')
return res.redirect(`${basePath}/signin`)
return res.redirect('/signin')
}
// The cookie will be set in via the next request because some
@@ -173,7 +168,7 @@ async function setUpPassport (express, xo) {
matches[1] === 'local' && req.body['remember-me'] === 'on'
)
res.redirect(basePath)
res.redirect('/')
})(req, res, next)
}
@@ -193,10 +188,10 @@ async function setUpPassport (express, xo) {
next()
} else if (req.cookies.token) {
next()
} else if (/fontawesome|images|styles/.test(req.url)) {
} else if (/favicon|fontawesome|images|styles/.test(req.url)) {
next()
} else {
return res.redirect(`${basePath}/signin`)
return res.redirect('/signin')
}
})
@@ -207,7 +202,7 @@ async function setUpPassport (express, xo) {
const user = await xo.authenticateUser({username, password})
done(null, user)
} catch (error) {
done(error.message)
done(null, false, { message: error.message })
}
}
))
@@ -215,19 +210,7 @@ async function setUpPassport (express, xo) {
// ===================================================================
const debugPlugin = createLogger('xo:plugin')
async function registerPlugin (pluginConf, pluginName) {
debugPlugin('register %s', pluginName)
const pluginPath = (function (name) {
try {
return require.resolve('xo-server-' + name)
} catch (e) {
return require.resolve(name)
}
})(pluginName)
async function registerPlugin (pluginPath, pluginName) {
const plugin = require(pluginPath)
// Supports both “normal” CommonJS and Babel's ES2015 modules.
@@ -242,36 +225,72 @@ async function registerPlugin (pluginConf, pluginName) {
? factory({ xo: this })
: factory
await this._registerPlugin(
await this.registerPlugin(
pluginName,
instance,
configurationSchema,
pluginConf
configurationSchema
)
}
function registerPlugins (plugins, xo) {
return Promise.all(map(plugins, (conf, name) => {
return registerPlugin.call(xo, conf, name).then(
() => {
debugPlugin(`successfully register ${name}`)
},
error => {
debugPlugin(`failed register ${name}`)
debugPlugin(error)
}
)
const debugPlugin = createLogger('xo:plugin')
function registerPluginWrapper (pluginPath, pluginName) {
debugPlugin('register %s', pluginName)
return registerPlugin.call(this, pluginPath, pluginName).then(
() => {
debugPlugin(`successfully register ${pluginName}`)
},
error => {
debugPlugin(`failed register ${pluginName}`)
debugPlugin(error)
}
)
}
const PLUGIN_PREFIX = 'xo-server-'
const PLUGIN_PREFIX_LENGTH = PLUGIN_PREFIX.length
async function registerPluginsInPath (path) {
const files = await readdir(path).catch(error => {
if (error.code === 'ENOENT') {
return []
}
throw error
})
await Promise.all(mapToArray(files, name => {
if (startsWith(name, PLUGIN_PREFIX)) {
return registerPluginWrapper.call(
this,
`${path}/${name}`,
name.slice(PLUGIN_PREFIX_LENGTH)
)
}
}))
}
async function registerPlugins (xo) {
await Promise.all(mapToArray([
`${__dirname}/../node_modules/`,
'/usr/local/lib/node_modules/'
], registerPluginsInPath, xo))
}
// ===================================================================
async function makeWebServerListen (opts) {
// Read certificate and key if necessary.
const {certificate, key} = opts
if (certificate && key) {
[opts.certificate, opts.key] = await Promise.all([
readFile(certificate),
async function makeWebServerListen ({
certificate,
// The properties was called `certificate` before.
cert = certificate,
key,
...opts
}) {
if (cert && key) {
[opts.cert, opts.key] = await Promise.all([
readFile(cert),
readFile(key)
])
}
@@ -295,14 +314,14 @@ async function makeWebServerListen (opts) {
async function createWebServer (opts) {
const webServer = new WebServer()
await Promise.all(map(opts, makeWebServerListen, webServer))
await Promise.all(mapToArray(opts, makeWebServerListen, webServer))
return webServer
}
// ===================================================================
const setUpProxies = (express, opts) => {
const setUpProxies = (express, opts, xo) => {
if (!opts) {
return
}
@@ -320,6 +339,8 @@ const setUpProxies = (express, opts) => {
const webSocketServer = new WebSocket.Server({
noServer: true
})
xo.on('stop', () => pFromCallback(cb => webSocketServer.close(cb)))
express.on('upgrade', (req, socket, head) => {
const {url} = req
@@ -353,13 +374,6 @@ const setUpStaticFiles = (express, opts) => {
// ===================================================================
function setUpWebSocketServer (webServer) {
return new WebSocket.Server({
server: webServer,
path: '/api/'
})
}
const errorClasses = {
ALREADY_AUTHENTICATED: AlreadyAuthenticated,
INVALID_CREDENTIAL: InvalidCredential,
@@ -380,7 +394,7 @@ const apiHelpers = {
// Handles both properties and wrapped models.
const properties = server.properties || server
server = pick(properties, 'id', 'host', 'username')
server = pick(properties, 'id', 'host', 'username', 'readOnly')
// Injects connection status.
const xapi = this._xapis[server.id]
@@ -394,17 +408,29 @@ const apiHelpers = {
}
}
const setUpApi = (webSocketServer, xo) => {
const context = Object.create(xo)
assign(xo, apiHelpers)
const setUpApi = (webServer, xo, verboseLogsOnErrors) => {
const webSocketServer = new WebSocket.Server({
server: webServer,
path: '/api/'
})
xo.on('stop', () => pFromCallback(cb => webSocketServer.close(cb)))
// FIXME: it can cause issues if there any property assignments in
// XO methods called from the API.
const context = { __proto__: xo, ...apiHelpers }
const api = new Api({
context
context,
verboseLogsOnErrors
})
xo.defineProperty('api', api)
api.addMethods(apiMethods)
webSocketServer.on('connection', socket => {
debug('+ WebSocket connection')
const { remoteAddress } = socket.upgradeReq.socket
debug('+ WebSocket connection (%s)', remoteAddress)
// Create the abstract XO object for this connection.
const connection = xo.createUserConnection()
@@ -422,7 +448,7 @@ const setUpApi = (webSocketServer, xo) => {
// Close the XO connection with this WebSocket.
socket.once('close', () => {
debug('- WebSocket connection')
debug('- WebSocket connection (%s)', remoteAddress)
connection.close()
})
@@ -445,25 +471,6 @@ const setUpApi = (webSocketServer, xo) => {
}
})
})
return api
}
const setUpScheduler = (api, xo) => {
const jobExecutor = new JobExecutor(xo, api)
const scheduler = new Scheduler(xo, {executor: jobExecutor})
xo.scheduler = scheduler
return scheduler
}
const setUpRemoteHandler = async xo => {
const remoteHandler = new RemoteHandler()
xo.remoteHandler = remoteHandler
xo.initRemotes()
xo.syncAllRemotes()
return remoteHandler
}
// ===================================================================
@@ -474,8 +481,9 @@ const setUpConsoleProxy = (webServer, xo) => {
const webSocketServer = new WebSocket.Server({
noServer: true
})
xo.on('stop', () => pFromCallback(cb => webSocketServer.close(cb)))
webServer.on('upgrade', (req, socket, head) => {
webServer.on('upgrade', async (req, socket, head) => {
const matches = CONSOLE_PROXY_PATH_RE.exec(req.url)
if (!matches) {
return
@@ -483,7 +491,23 @@ const setUpConsoleProxy = (webServer, xo) => {
const [, id] = matches
try {
const xapi = xo.getXAPI(id, ['VM', 'VM-controller'])
// TODO: factorize permissions checking in an Express middleware.
{
const { token } = parseCookies(req.headers.cookie)
const user = await xo.authenticateUser({ token })
if (!await xo.hasPermissions(user.id, [ [ id, 'operate' ] ])) { // eslint-disable-line space-before-keywords
throw new InvalidCredential()
}
const { remoteAddress } = socket
debug('+ Console proxy (%s - %s)', user.name, remoteAddress)
socket.on('close', () => {
debug('- Console proxy (%s - %s)', user.name, remoteAddress)
})
}
const xapi = xo.getXapi(id, ['VM', 'VM-controller'])
const vmConsole = xapi.getVmConsole(id)
// FIXME: lost connection due to VM restart is not detected.
@@ -498,43 +522,36 @@ const setUpConsoleProxy = (webServer, xo) => {
// ===================================================================
const registerPasswordAuthenticationProvider = (xo) => {
const registerPasswordAuthenticationProvider = xo => {
async function passwordAuthenticationProvider ({
email,
username,
password
}) {
/* eslint no-throw-literal: 0 */
if (email === undefined || password === undefined) {
throw null
if (username === undefined || password === undefined) {
return
}
// TODO: this is deprecated and should be removed.
const user = await xo._users.first({email})
if (!user || !(await user.checkPassword(password))) {
throw null
const user = await xo.getUserByName(username, true)
if (user && await xo.checkUserPassword(user.id, password)) {
return user.id
}
return user
}
xo.registerAuthenticationProvider(passwordAuthenticationProvider)
}
const registerTokenAuthenticationProvider = (xo) => {
const registerTokenAuthenticationProvider = xo => {
async function tokenAuthenticationProvider ({
token: tokenId
}) {
/* eslint no-throw-literal: 0 */
if (!tokenId) {
throw null
return
}
try {
return (await xo.getAuthenticationToken(tokenId)).user_id
} catch (e) {
// It is not an error if the token does not exists.
throw null
return
}
}
@@ -543,15 +560,18 @@ const registerTokenAuthenticationProvider = (xo) => {
// ===================================================================
const help = (function ({name, version}) {
return () => `${name} v${version}`
})(require('../package.json'))
const USAGE = (({
name,
version
}) => `Usage: ${name} [--safe-mode]
${name} v${version}`)(require('../package.json'))
// ===================================================================
export default async function main (args) {
if (args.indexOf('--help') !== -1 || args.indexOf('-h') !== -1) {
return help()
if (includes(args, '--help') || includes(args, '-h')) {
return USAGE
}
{
@@ -580,14 +600,14 @@ export default async function main (args) {
warn('Failed to change user/group:', error)
}
// Create the main object which will connects to Xen servers and
// manages all the models.
const xo = new Xo()
await xo.start({
redis: {
uri: config.redis && config.redis.uri
}
})
// Creates main object.
const xo = new Xo(config)
// Register web server close on XO stop.
xo.on('stop', () => pFromCallback(cb => webServer.close(cb)))
// Connects to all registered servers.
await xo.start()
// Loads default authentication providers.
registerPasswordAuthenticationProvider(xo)
@@ -596,6 +616,39 @@ export default async function main (args) {
// Express is used to manage non WebSocket connections.
const express = createExpressApp()
if (config.http.redirectToHttps) {
let port
forEach(config.http.listen, listen => {
if (
listen.port &&
(listen.cert || listen.certificate)
) {
port = listen.port
return false
}
})
if (port === undefined) {
warn('Could not setup HTTPs redirection: no HTTPs port found')
} else {
express.use((req, res, next) => {
if (req.secure) {
return next()
}
res.redirect(`https://${req.hostname}:${port}${req.originalUrl}`)
})
}
}
// Must be set up before the API.
setUpConsoleProxy(webServer, xo)
// Must be set up before the API.
express.use(bind(xo._handleHttpRequest, xo))
// Everything above is not protected by the sign in, allowing xo-cli
// to work properly.
await setUpPassport(express, xo)
// Attaches express to the web server.
@@ -604,28 +657,15 @@ export default async function main (args) {
express.emit('upgrade', req, socket, head)
})
// Must be set up before the API.
setUpConsoleProxy(webServer, xo)
// Must be set up before the API.
express.use(bind(xo._handleHttpRequest, xo))
// TODO: remove when no longer necessary.
express.use(bind(xo._handleProxyRequest, xo))
// Must be set up before the static files.
const webSocketServer = setUpWebSocketServer(webServer)
const api = setUpApi(webSocketServer, xo)
setUpApi(webServer, xo, config.verboseApiLogsOnErrors)
const scheduler = setUpScheduler(api, xo)
setUpRemoteHandler(xo)
setUpProxies(express, config.http.proxies)
setUpProxies(express, config.http.proxies, xo)
setUpStaticFiles(express, config.http.mounts)
if (config.plugins) {
await registerPlugins(config.plugins, xo)
if (!includes(args, '--safe-mode')) {
await registerPlugins(xo)
}
if (!(await xo._users.exists())) {
@@ -636,28 +676,18 @@ export default async function main (args) {
info('Default user created:', email, ' with password', password)
}
// Gracefully shutdown on signals.
//
// TODO: implements a timeout? (or maybe it is the services launcher
// responsibility?)
process.on('SIGINT', async () => {
debug('SIGINT caught, closing web server…')
const shutdown = signal => {
debug('%s caught, closing…', signal)
xo.stop()
}
webServer.close()
// Gracefully shutdown on signals.
process.on('SIGINT', () => shutdown('SIGINT'))
process.on('SIGTERM', () => shutdown('SIGTERM'))
webSocketServer.close()
scheduler.disableAll()
await xo.disableAllRemotes()
})
process.on('SIGTERM', async () => {
debug('SIGTERM caught, closing web server…')
await eventToPromise(xo, 'stopped')
webServer.close()
webSocketServer.close()
scheduler.disableAll()
await xo.disableAllRemotes()
})
return eventToPromise(webServer, 'close')
debug('bye :-)')
}

View File

@@ -1,7 +1,11 @@
import assign from 'lodash.assign'
import forEach from 'lodash.foreach'
import {BaseError} from 'make-error'
import {
createRawObject,
forEach
} from './utils'
export class JobExecutorError extends BaseError {}
export class UnsupportedJobType extends JobExecutorError {
constructor (job) {
@@ -15,58 +19,142 @@ export class UnsupportedVectorType extends JobExecutorError {
}
export const productParams = (...args) => {
let product = Object.create(null)
let product = createRawObject()
assign(product, ...args)
return product
}
export function _computeCrossProduct (items, productCb, extractValueMap = {}) {
const upstreamValues = []
const itemsCopy = items.slice()
const item = itemsCopy.pop()
const values = extractValueMap[item.type] && extractValueMap[item.type](item) || item
forEach(values, value => {
if (itemsCopy.length) {
let downstreamValues = _computeCrossProduct(itemsCopy, productCb, extractValueMap)
forEach(downstreamValues, downstreamValue => {
upstreamValues.push(productCb(value, downstreamValue))
})
} else {
upstreamValues.push(value)
}
})
return upstreamValues
}
export default class JobExecutor {
constructor (xo, api) {
constructor (xo) {
this.xo = xo
this.api = api
this._extractValueCb = {
'set': items => items.values
}
}
exec (job) {
if (job.type === 'call') {
this._execCall(job.userId, job.method, job.paramsVector)
} else {
throw new UnsupportedJobType(job)
}
}
_execCall (userId, method, paramsVector) {
let paramsFlatVector
if (paramsVector.type === 'crossProduct') {
paramsFlatVector = this._computeCrossProduct(paramsVector.items, productParams, this._extractValueCb)
} else {
throw new UnsupportedVectorType(paramsVector)
}
const connection = this.xo.createUserConnection()
connection.set('user_id', userId)
forEach(paramsFlatVector, params => {
this.api.call(connection, method, assign({}, params))
// The logger is not available until Xo has started.
xo.on('started', () => {
this._logger = this.xo.getLogger('jobs')
})
connection.close()
}
_computeCrossProduct (items, productCb, extractValueMap = {}) {
const upstreamValues = []
const itemsCopy = items.slice()
const item = itemsCopy.pop()
const values = extractValueMap[item.type] && extractValueMap[item.type](item) || item
forEach(values, value => {
if (itemsCopy.length) {
let downstreamValues = this._computeCrossProduct(itemsCopy, productCb, extractValueMap)
forEach(downstreamValues, downstreamValue => {
upstreamValues.push(productCb(value, downstreamValue))
})
async exec (job) {
const runJobId = this._logger.notice(`Starting execution of ${job.id}.`, {
event: 'job.start',
userId: job.userId,
jobId: job.id,
key: job.key
})
try {
if (job.type === 'call') {
const execStatus = await this._execCall(job, runJobId)
this.xo.emit('job:terminated', execStatus)
} else {
upstreamValues.push(value)
throw new UnsupportedJobType(job)
}
this._logger.notice(`Execution terminated for ${job.id}.`, {
event: 'job.end',
runJobId
})
} catch (e) {
this._logger.error(`The execution of ${job.id} has failed.`, {
event: 'job.end',
runJobId,
error: e
})
}
}
async _execCall (job, runJobId) {
let paramsFlatVector
if (job.paramsVector) {
if (job.paramsVector.type === 'crossProduct') {
paramsFlatVector = _computeCrossProduct(job.paramsVector.items, productParams, this._extractValueCb)
} else {
throw new UnsupportedVectorType(job.paramsVector)
}
} else {
paramsFlatVector = [{}] // One call with no parameters
}
const connection = this.xo.createUserConnection()
const promises = []
connection.set('user_id', job.userId)
const execStatus = {
runJobId,
start: Date.now(),
calls: {}
}
forEach(paramsFlatVector, params => {
const runCallId = this._logger.notice(`Starting ${job.method} call. (${job.id})`, {
event: 'jobCall.start',
runJobId,
method: job.method,
params
})
const call = execStatus.calls[runCallId] = {
method: job.method,
params,
start: Date.now()
}
promises.push(
this.xo.api.call(connection, job.method, assign({}, params)).then(
value => {
this._logger.notice(`Call ${job.method} (${runCallId}) is a success. (${job.id})`, {
event: 'jobCall.end',
runJobId,
runCallId,
returnedValue: value
})
call.returnedValue = value
call.end = Date.now()
},
reason => {
this._logger.notice(`Call ${job.method} (${runCallId}) has failed. (${job.id})`, {
event: 'jobCall.end',
runJobId,
runCallId,
error: reason
})
call.error = reason
call.end = Date.now()
}
)
)
})
return upstreamValues
connection.close()
await Promise.all(promises)
execStatus.end = Date.now()
return execStatus
}
}

View File

@@ -4,7 +4,7 @@ import {expect} from 'chai'
import leche from 'leche'
import {productParams} from './job-executor'
import JobExecutor from './job-executor'
import {_computeCrossProduct} from './job-executor'
describe('productParams', function () {
leche.withData({
@@ -36,8 +36,7 @@ describe('productParams', function () {
})
})
describe('JobExecutor._computeCrossProduct', function () {
const jobExecutor = new JobExecutor({})
describe('_computeCrossProduct', function () {
// Gives the sum of all args
const addTest = (...args) => args.reduce((prev, curr) => prev + curr, 0)
// Gives the product of all args
@@ -64,7 +63,7 @@ describe('JobExecutor._computeCrossProduct', function () {
]
}, function (product, items, cb) {
it('Crosses sets of values with a crossProduct callback', function () {
expect(jobExecutor._computeCrossProduct(items, cb)).to.have.members(product)
expect(_computeCrossProduct(items, cb)).to.have.members(product)
})
})
})

72
src/loggers/leveldb.js Normal file
View File

@@ -0,0 +1,72 @@
import highland from 'highland'
import { forEach, noop } from '../utils'
// See: https://en.wikipedia.org/wiki/Syslog#Severity_level
const LEVELS = [
'emergency',
'alert',
'critical',
'error',
'warning',
'notice',
'informational',
'debug'
]
let lastDate = 0
let lastId = 0
function generateUniqueKey (date) {
lastId = (date === lastDate) ? (lastId + 1) : 0
lastDate = date
return `${lastDate}:${lastId}`
}
export default class LevelDbLogger {
constructor (db, namespace) {
this._db = db
this._namespace = namespace
}
_add (level, message, data) {
const log = {
level,
message,
data,
namespace: this._namespace,
time: Date.now()
}
const key = generateUniqueKey(log.time)
this._db.put(key, log)
return key
}
createReadStream () {
return highland(this._db.createReadStream())
.filter(({value}) => value.namespace === this._namespace)
}
del (id) {
if (!Array.isArray(id)) {
id = [id]
}
forEach(id, id => {
this._db.get(id, (err, value) => {
if (!err && value.namespace === this._namespace) {
this._db.del(id, noop)
}
})
})
}
}
// Create high level log methods.
for (const level of LEVELS) {
Object.defineProperty(LevelDbLogger.prototype, level, {
value (message, data) {
return this._add(level, message, data)
}
})
}

202
src/logs-cli.js Normal file
View File

@@ -0,0 +1,202 @@
import appConf from 'app-conf'
import get from 'lodash.get'
import highland from 'highland'
import levelup from 'level-party'
import ndjson from 'ndjson'
import parseArgs from 'minimist'
import sublevel from 'level-sublevel'
import util from 'util'
import { repair as repairDb } from 'leveldown'
import {forEach} from './utils'
import globMatcher from './glob-matcher'
// ===================================================================
async function printLogs (db, args) {
let stream = highland(db.createReadStream({reverse: true}))
if (args.since) {
stream = stream.filter(({value}) => (value.time >= args.since))
}
if (args.until) {
stream = stream.filter(({value}) => (value.time <= args.until))
}
const fields = Object.keys(args.matchers)
if (fields.length > 0) {
stream = stream.filter(({value}) => {
for (const field of fields) {
const fieldValue = get(value, field)
if (fieldValue === undefined || !args.matchers[field](fieldValue)) {
return false
}
}
return true
})
}
stream = stream.take(args.limit)
if (args.json) {
stream = highland(stream.pipe(ndjson.serialize()))
.each(value => {
process.stdout.write(value)
})
} else {
stream = stream.each(value => {
console.log(util.inspect(value, { depth: null }))
})
}
return new Promise(resolve => {
stream.done(resolve)
})
}
// ===================================================================
function helper () {
console.error(`
xo-server-logs --help, -h
Display this help message.
xo-server-logs [--json] [--limit=<limit>] [--since=<date>] [--until=<date>] [<pattern>...]
Prints the logs.
--json
Display the results as new line delimited JSON for consumption
by another program.
--limit=<limit>, -n <limit>
Limit the number of results to be displayed (default 100)
--since=<date>, --until=<date>
Start showing entries on or newer than the specified date, or on
or older than the specified date.
<date> should use the format \`YYYY-MM-DD\`.
<pattern>
Patterns can be used to filter the entries.
Patterns have the following format \`<field>=<value>\`/\`<field>\`.
xo-server-logs --repair
Repair/compact the database.
This is an advanced operation and should be used only when necessary and offline (xo-server should be stopped).
`)
}
// ===================================================================
function getArgs () {
const stringArgs = ['since', 'until', 'limit']
const args = parseArgs(process.argv.slice(2), {
string: stringArgs,
boolean: ['help', 'json', 'repair'],
default: {
limit: 100,
json: false,
help: false
},
alias: {
limit: 'n',
help: 'h'
}
})
const patterns = {}
for (let value of args._) {
value = String(value)
const i = value.indexOf('=')
if (i !== -1) {
const field = value.slice(0, i)
const pattern = value.slice(i + 1)
patterns[pattern]
? patterns[field].push(pattern)
: patterns[field] = [ pattern ]
} else if (!patterns[value]) {
patterns[value] = null
}
}
const trueFunction = () => true
args.matchers = {}
for (const field in patterns) {
const values = patterns[field]
args.matchers[field] = (values === null) ? trueFunction : globMatcher(values)
}
// Warning: minimist makes one array of values if the same option is used many times.
// (But only for strings args, not boolean)
forEach(stringArgs, arg => {
if (args[arg] instanceof Array) {
throw new Error(`error: too many values for ${arg} argument`)
}
})
;['since', 'until'].forEach(arg => {
if (args[arg] !== undefined) {
args[arg] = Date.parse(args[arg])
if (isNaN(args[arg])) {
throw new Error(`error: bad ${arg} timestamp format`)
}
}
})
if (isNaN(args.limit = +args.limit)) {
throw new Error('error: limit is not a valid number')
}
return args
}
// ===================================================================
export default async function main () {
const args = getArgs()
if (args.help) {
helper()
return
}
const config = await appConf.load('xo-server', {
ignoreUnknownFormats: true
})
if (args.repair) {
await new Promise((resolve, reject) => {
repairDb(`${config.datadir}/leveldb`, error => {
if (error) {
reject(error)
} else {
resolve()
}
})
})
return
}
const db = sublevel(levelup(
`${config.datadir}/leveldb`,
{ valueEncoding: 'json' }
)).sublevel('logs')
return printLogs(db, args)
}

View File

@@ -1,15 +1,18 @@
import assign from 'lodash.assign'
import forEach from 'lodash.foreach'
import isEmpty from 'lodash.isempty'
import isString from 'lodash.isstring'
import {EventEmitter} from 'events'
import {
forEach,
isEmpty
} from './utils'
// ===================================================================
export default class Model extends EventEmitter {
constructor (properties) {
super()
this.properties = assign({}, this.default)
this.properties = { ...this.default }
if (properties) {
this.set(properties)
@@ -39,7 +42,7 @@ export default class Model extends EventEmitter {
set (properties, value) {
// This method can also be used with two arguments to set a single
// property.
if (value !== undefined) {
if (isString(properties)) {
properties = { [properties]: value }
}

View File

@@ -1,9 +1,10 @@
import forEach from 'lodash.foreach'
import map from 'lodash.map'
import Collection from '../collection/redis'
import Model from '../model'
import {multiKeyHash} from '../utils'
import {
forEach,
mapToArray,
multiKeyHash
} from '../utils'
// ===================================================================
@@ -58,11 +59,11 @@ export class Acls extends Collection {
})
if (toUpdate.length) {
// Removes all existing entries.
await this.remove(map(toUpdate, 'id'))
await this.remove(mapToArray(toUpdate, 'id'))
// Compute the new ids (new hashes).
const {hash} = Acl
await Promise.all(map(
await Promise.all(mapToArray(
toUpdate,
(acl) => hash(acl.subject, acl.object, acl.action).then(id => {
acl.id = id

View File

@@ -1,8 +1,8 @@
import forEach from 'lodash.foreach'
import Collection from '../collection/redis'
import Model from '../model'
import { forEach } from '../utils'
// ===================================================================
export default class Group extends Model {}

View File

@@ -1,7 +1,6 @@
import forEach from 'lodash.foreach'
import Collection from '../collection/redis'
import Model from '../model'
import { forEach } from '../utils'
// ===================================================================

View File

@@ -1,8 +1,6 @@
import assign from 'lodash.assign'
import forEach from 'lodash.foreach'
import Collection from '../collection/redis'
import Model from '../model'
import { forEach } from '../utils'
// ===================================================================
@@ -33,9 +31,10 @@ export class PluginsMetadata extends Collection {
throw new Error('no such plugin metadata')
}
const {properties} = pluginMetadata
assign(properties, data)
return await this.save(properties)
return await this.save({
...pluginMetadata.properties,
...data
})
}
async get (properties) {

View File

@@ -1,6 +1,8 @@
import Collection from '../collection/redis'
import forEach from 'lodash.foreach'
import Model from '../model'
import {
forEach
} from '../utils'
// ===================================================================

View File

@@ -1,6 +1,6 @@
import Collection from '../collection/redis'
import forEach from 'lodash.foreach'
import Model from '../model'
import { forEach } from '../utils'
// ===================================================================
@@ -15,12 +15,13 @@ export class Schedules extends Collection {
return 'schedule:'
}
create (userId, job, cron, enabled) {
create (userId, job, cron, enabled, name = undefined) {
return this.add(new Schedule({
userId,
job,
cron,
enabled
enabled,
name
}))
}

View File

@@ -12,11 +12,11 @@ export class Servers extends Collection {
return Server
}
async create ({host, username, password}) {
async create ({host, username, password, readOnly}) {
if (await this.exists({host})) {
throw new Error('server already exists')
}
return await this.add({host, username, password})
return await this.add({host, username, password, readOnly})
}
}

View File

@@ -1,26 +1,10 @@
import Collection from '../collection/redis'
import Model from '../model'
import {generateToken} from '../utils'
// ===================================================================
export default class Token extends Model {
static generate (userId) {
return generateToken().then(token => new Token({
id: token,
user_id: userId
}))
}
}
export default class Token extends Model {}
// -------------------------------------------------------------------
export class Tokens extends Collection {
get Model () {
return Token
}
generate (userId) {
return Token.generate(userId).then(token => this.add(token))
}
}
export class Tokens extends Collection {}

View File

@@ -1,8 +1,8 @@
import forEach from 'lodash.foreach'
import {hash, needsRehash, verify} from 'hashy'
import { hash } from 'hashy'
import Collection from '../collection/redis'
import Model from '../model'
import { forEach } from '../utils'
// ===================================================================
@@ -16,22 +16,6 @@ const PERMISSIONS = {
// ===================================================================
export default class User extends Model {
async checkPassword (password) {
const hash = this.get('pw_hash')
if (!(hash && await verify(password, hash))) {
return false
}
// There might be no hash if the user authenticate with another
// method (e.g. LDAP).
if (needsRehash(hash)) {
await this.setPassword(password)
}
return true
}
hasPermission (permission) {
return PERMISSIONS[this.get('permission')] >= PERMISSIONS[permission]
}

View File

@@ -27,8 +27,10 @@ export default function proxyConsole (ws, vmConsole, sessionId) {
// TODO: check status code 200.
debug('connected')
})).on('data', data => {
// Encode to base 64.
ws.send(data.toString('base64'))
if (!closed) {
// Encode to base 64.
ws.send(data.toString('base64'))
}
}).on('end', () => {
if (!closed) {
closed = true
@@ -46,8 +48,10 @@ export default function proxyConsole (ws, vmConsole, sessionId) {
socket.close()
})
.on('message', data => {
// Decode from base 64.
socket.write(new Buffer(data, 'base64'))
if (!closed) {
// Decode from base 64.
socket.write(new Buffer(data, 'base64'))
}
})
.on('close', () => {
if (!closed) {

View File

@@ -1,139 +0,0 @@
import filter from 'lodash.filter'
import forEach from 'lodash.foreach'
import fs from 'fs-promise'
import {exec} from 'child_process'
import {promisify} from './utils'
const execAsync = promisify(exec)
const noop = () => {}
class NfsMounter {
async _loadRealMounts () {
let stdout
try {
[stdout] = await execAsync('findmnt -P -t nfs,nfs4 --output SOURCE,TARGET --noheadings')
} catch (exc) {
// When no mounts are found, the call pretends to fail...
}
const mounted = {}
if (stdout) {
const regex = /^SOURCE="([^:]*):(.*)" TARGET="(.*)"$/
forEach(stdout.split('\n'), m => {
if (m) {
const match = regex.exec(m)
mounted[match[3]] = {
host: match[1],
share: match[2]
}
}
})
}
this._realMounts = mounted
return mounted
}
_fullPath (path) {
return path
}
_matchesRealMount (mount) {
return this._fullPath(mount.path) in this._realMounts
}
async _mount (mount) {
const path = this._fullPath(mount.path)
await fs.ensureDir(path)
return await execAsync(`mount -t nfs ${mount.host}:${mount.share} ${path}`)
}
async forget (mount) {
try {
await this._umount(mount)
} catch (_) {
// We have to go on...
}
}
async _umount (mount) {
const path = this._fullPath(mount.path)
await execAsync(`umount ${path}`)
}
async sync (mount) {
await this._loadRealMounts()
if (this._matchesRealMount(mount) && !mount.enabled) {
try {
await this._umount(mount)
} catch (exc) {
mount.enabled = true
mount.error = exc.message
}
} else if (!this._matchesRealMount(mount) && mount.enabled) {
try {
await this._mount(mount)
} catch (exc) {
mount.enabled = false
mount.error = exc.message
}
}
return mount
}
async disableAll (mounts) {
await this._loadRealMounts()
forEach(mounts, async mount => {
if (this._matchesRealMount(mount)) {
try {
await this._umount(mount)
} catch (_) {
// We have to go on...
}
}
})
}
}
class LocalHandler {
constructor () {
this.forget = noop
this.disableAll = noop
}
async sync (local) {
if (local.enabled) {
try {
await fs.ensureDir(local.path)
await fs.access(local.path, fs.R_OK | fs.W_OK)
} catch (exc) {
local.enabled = false
local.error = exc.message
}
}
return local
}
}
export default class RemoteHandler {
constructor () {
this.handlers = {
nfs: new NfsMounter(),
local: new LocalHandler()
}
}
async sync (remote) {
return await this.handlers[remote.type].sync(remote)
}
async forget (remote) {
return await this.handlers[remote.type].forget(remote)
}
async disableAll (remotes) {
const promises = []
forEach(['local', 'nfs'], type => promises.push(this.handlers[type].disableAll(filter(remotes, remote => remote.type === type))))
await Promise.all(promises)
}
}

View File

@@ -0,0 +1,177 @@
import eventToPromise from 'event-to-promise'
import getStream from 'get-stream'
import through2 from 'through2'
import {
parse
} from 'xo-remote-parser'
import {
addChecksumToReadStream,
noop,
validChecksumOfReadStream
} from '../utils'
export default class RemoteHandlerAbstract {
constructor (remote) {
this._remote = parse({...remote})
if (this._remote.type !== this.type) {
throw new Error('Incorrect remote type')
}
}
get type () {
throw new Error('Not implemented')
}
/**
* Asks the handler to sync the state of the effective remote with its' metadata
*/
async sync () {
return this._sync()
}
async _sync () {
throw new Error('Not implemented')
}
/**
* Free the resources possibly dedicated to put the remote at work, when it is no more needed
*/
async forget () {
return this._forget()
}
async _forget () {
throw new Error('Not implemented')
}
async outputFile (file, data, options) {
return this._outputFile(file, data, options)
}
async _outputFile (file, data, options) {
const stream = await this.createOutputStream(file)
const promise = eventToPromise(stream, 'finish')
stream.end(data)
return promise
}
async readFile (file, options) {
return this._readFile(file, options)
}
async _readFile (file, options) {
return getStream(await this.createReadStream(file, options))
}
async rename (oldPath, newPath) {
return this._rename(oldPath, newPath)
}
async _rename (oldPath, newPath) {
throw new Error('Not implemented')
}
async list (dir = '.') {
return this._list(dir)
}
async _list (dir) {
throw new Error('Not implemented')
}
async createReadStream (file, {
checksum = false,
ignoreMissingChecksum = false,
...options
} = {}) {
const streamP = this._createReadStream(file, options).then(async stream => {
await eventToPromise(stream, 'readable')
if (stream.length === undefined) {
stream.length = await this.getSize(file).catch(noop)
}
return stream
})
if (!checksum) {
return streamP
}
try {
checksum = await this.readFile(`${file}.checksum`)
} catch (error) {
if (error.code === 'ENOENT' && ignoreMissingChecksum) {
return streamP
}
throw error
}
let stream = await streamP
const { length } = stream
stream = validChecksumOfReadStream(stream, checksum.toString())
stream.length = length
return stream
}
async _createReadStream (file, options) {
throw new Error('Not implemented')
}
async createOutputStream (file, {
checksum = false,
...options
} = {}) {
const streamP = this._createOutputStream(file, options)
if (!checksum) {
return streamP
}
const connectorStream = through2()
const forwardError = error => {
connectorStream.emit('error', error)
}
const streamWithChecksum = addChecksumToReadStream(connectorStream)
streamWithChecksum.pipe(await streamP)
streamWithChecksum.on('error', forwardError)
streamWithChecksum.checksum
.then(value => this.outputFile(`${file}.checksum`, value))
.catch(forwardError)
return connectorStream
}
async _createOutputStream (file, options) {
throw new Error('Not implemented')
}
async unlink (file, {
checksum = false
} = {}) {
if (checksum) {
this._unlink(`${file}.checksum`).catch(noop)
}
return this._unlink(file)
}
async _unlink (file) {
throw new Error('Not implemented')
}
async getSize (file) {
return this._getSize(file)
}
async _getSize (file) {
throw new Error('Not implemented')
}
}

View File

@@ -0,0 +1,84 @@
import fs from 'fs-promise'
import startsWith from 'lodash.startswith'
import {
dirname,
resolve
} from 'path'
import RemoteHandlerAbstract from './abstract'
import {
noop
} from '../utils'
export default class LocalHandler extends RemoteHandlerAbstract {
get type () {
return 'local'
}
_getFilePath (file) {
const parts = [this._remote.path]
if (file) {
parts.push(file)
}
const path = resolve.apply(null, parts)
if (!startsWith(path, this._remote.path)) {
throw new Error('Remote path is unavailable')
}
return path
}
async _sync () {
if (this._remote.enabled) {
try {
await fs.ensureDir(this._remote.path)
await fs.access(this._remote.path, fs.R_OK | fs.W_OK)
} catch (exc) {
this._remote.enabled = false
this._remote.error = exc.message
}
}
return this._remote
}
async _forget () {
return noop()
}
async _outputFile (file, data, options) {
const path = this._getFilePath(file)
await fs.ensureDir(dirname(path))
await fs.writeFile(this._getFilePath(file), data, options)
}
async _readFile (file, options) {
return fs.readFile(this._getFilePath(file), options)
}
async _rename (oldPath, newPath) {
return fs.rename(this._getFilePath(oldPath), this._getFilePath(newPath))
}
async _list (dir = '.') {
return fs.readdir(this._getFilePath(dir))
}
async _createReadStream (file, options) {
return fs.createReadStream(this._getFilePath(file), options)
}
async _createOutputStream (file, options) {
const path = this._getFilePath(file)
await fs.ensureDir(dirname(path))
return fs.createWriteStream(path, options)
}
async _unlink (file) {
return fs.unlink(this._getFilePath(file))
}
async _getSize (file) {
const stats = await fs.stat(this._getFilePath(file))
return stats.size
}
}

View File

@@ -0,0 +1,77 @@
import execa from 'execa'
import fs from 'fs-promise'
import LocalHandler from './local'
import {
forEach
} from '../utils'
export default class NfsHandler extends LocalHandler {
get type () {
return 'nfs'
}
async _loadRealMounts () {
let stdout
const mounted = {}
try {
[stdout] = await execa('findmnt', ['-P', '-t', 'nfs,nfs4', '--output', 'SOURCE,TARGET', '--noheadings'])
const regex = /^SOURCE="([^:]*):(.*)" TARGET="(.*)"$/
forEach(stdout.split('\n'), m => {
if (m) {
const match = regex.exec(m)
mounted[match[3]] = {
host: match[1],
share: match[2]
}
}
})
} catch (exc) {
// When no mounts are found, the call pretends to fail...
}
this._realMounts = mounted
return mounted
}
_matchesRealMount (remote) {
return remote.path in this._realMounts
}
async _mount (remote) {
await fs.ensureDir(remote.path)
return execa('mount', ['-t', 'nfs', `${remote.host}:${remote.share}`, remote.path])
}
async _sync () {
await this._loadRealMounts()
if (this._matchesRealMount(this._remote) && !this._remote.enabled) {
try {
await this._umount(this._remote)
} catch (exc) {
this._remote.enabled = true
this._remote.error = exc.message
}
} else if (!this._matchesRealMount(this._remote) && this._remote.enabled) {
try {
await this._mount(this._remote)
} catch (exc) {
this._remote.enabled = false
this._remote.error = exc.message
}
}
return this._remote
}
async _forget () {
try {
await this._umount(this._remote)
} catch (_) {
// We have to go on...
}
}
async _umount (remote) {
await execa('umount', [remote.path])
}
}

144
src/remote-handlers/smb.js Normal file
View File

@@ -0,0 +1,144 @@
import Smb2 from '@marsaud/smb2-promise'
import RemoteHandlerAbstract from './abstract'
import {
noop
} from '../utils'
export default class SmbHandler extends RemoteHandlerAbstract {
constructor (remote) {
super(remote)
this._forget = noop
}
get type () {
return 'smb'
}
_getClient (remote) {
return new Smb2({
share: `\\\\${remote.host}`,
domain: remote.domain,
username: remote.username,
password: remote.password,
autoCloseTimeout: 0
})
}
_getFilePath (file) {
if (file === '.') {
file = undefined
}
const parts = []
if (this._remote.path !== '') {
parts.push(this._remote.path)
}
if (file) {
parts.push(file.split('/'))
}
return parts.join('\\')
}
_dirname (file) {
const parts = file.split('\\')
parts.pop()
return parts.join('\\')
}
async _sync () {
if (this._remote.enabled) {
try {
// Check access (smb2 does not expose connect in public so far...)
await this.list()
} catch (error) {
this._remote.enabled = false
this._remote.error = error.message
}
}
return this._remote
}
async _outputFile (file, data, options) {
const client = this._getClient(this._remote)
const path = this._getFilePath(file)
const dir = this._dirname(path)
try {
if (dir) {
await client.ensureDir(dir)
}
return client.writeFile(path, data, options)
} finally {
client.close()
}
}
async _readFile (file, options) {
const client = this._getClient(this._remote)
try {
return client.readFile(this._getFilePath(file), options)
} finally {
client.close()
}
}
async _rename (oldPath, newPath) {
const client = this._getClient(this._remote)
try {
return client.rename(this._getFilePath(oldPath), this._getFilePath(newPath))
} finally {
client.close()
}
}
async _list (dir = '.') {
const client = this._getClient(this._remote)
try {
return client.readdir(this._getFilePath(dir))
} finally {
client.close()
}
}
async _createReadStream (file, options) {
const client = this._getClient(this._remote)
const stream = await client.createReadStream(this._getFilePath(file), options) // FIXME ensure that options are properly handled by @marsaud/smb2
stream.on('end', () => client.close())
return stream
}
async _createOutputStream (file, options) {
const client = this._getClient(this._remote)
const path = this._getFilePath(file)
const dir = this._dirname(path)
let stream
try {
if (dir) {
await client.ensureDir(dir)
}
stream = await client.createWriteStream(path, options) // FIXME ensure that options are properly handled by @marsaud/smb2
} catch (err) {
client.close()
throw err
}
stream.on('finish', () => client.close())
return stream
}
async _unlink (file) {
const client = this._getClient(this._remote)
try {
return client.unlink(this._getFilePath(file))
} finally {
client.close()
}
}
async _getSize (file) {
const client = await this._getClient(this._remote)
try {
return client.getSize(this._getFilePath(file))
} finally {
client.close()
}
}
}

View File

@@ -1,7 +1,8 @@
import forEach from 'lodash.foreach'
import {BaseError} from 'make-error'
import {CronJob} from 'cron'
import { forEach } from './utils'
const _resolveId = scheduleOrId => scheduleOrId.id || scheduleOrId
export class SchedulerError extends BaseError {}
@@ -32,11 +33,11 @@ export class ScheduleJobNotFound extends SchedulerError {
}
export default class Scheduler {
constructor (xo, {executor}) {
this.executor = executor
constructor (xo) {
this.xo = xo
this._scheduleTable = undefined
this._loadSchedules()
this._runningSchedules = {}
}
async _loadSchedules () {
@@ -114,26 +115,29 @@ export default class Scheduler {
}
_enable (schedule) {
const running = this._runningSchedules
const { id } = schedule
const jobId = schedule.job
const cronJob = new CronJob(schedule.cron, async () => {
if (running[id]) {
return // Simply ignore.
}
try {
const job = await this._getJob(jobId, schedule.id)
this.executor.exec(job)
} catch (_) {
// FIXME What do we do ?
running[id] = true
await this.xo.runJobSequence([ jobId ])
} catch (error) {
// FIXME: better error handling
console.error(error && error.stack || error)
} finally {
delete running[id]
}
})
this._cronJobs[schedule.id] = cronJob
this._cronJobs[id] = cronJob
cronJob.start()
this._scheduleTable[schedule.id] = true
}
async _getJob (id, scheduleId) {
const job = await this.xo.getJob(id)
if (!job) {
throw new ScheduleJobNotFound(id, scheduleId)
}
return job
this._scheduleTable[id] = true
}
_disable (scheduleOrId) {

28
src/schemas/acl.js Normal file
View File

@@ -0,0 +1,28 @@
export default {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
id: {
type: 'string',
description: 'unique identifier for this ACL'
},
action: {
type: 'string',
description: 'permission (or role)'
},
object: {
type: 'string',
description: 'item (or set)'
},
subject: {
type: 'string',
description: 'user (or group)'
}
},
required: [
'id',
'action',
'object',
'subject'
]
}

29
src/schemas/log.js Normal file
View File

@@ -0,0 +1,29 @@
export default {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
id: {
type: 'string',
description: 'unique identifier for this log'
},
time: {
type: 'string',
description: 'timestamp (in milliseconds) of this log'
},
message: {
type: 'string',
description: 'human readable (short) description of this log'
},
namespace: {
type: 'string',
description: 'space to store logs'
},
data: {}
},
required: [
'id',
'time',
'message',
'namespace'
]
}

View File

@@ -0,0 +1,33 @@
export default {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
event: {
enum: ['jobCall.end']
},
runJobId: {
type: 'string',
description: 'instance id of this job'
},
runCallId: {
type: 'string',
description: 'instance id of this call'
},
error: {
type: 'object',
description: 'describe one failure, exists if the call has failed'
},
returnedValue: {
description: 'call\'s result, exists if the call is a success'
}
},
required: [
'event',
'runJobId',
'runCallId'
],
oneOf: [
{ required: ['error'] },
{ required: ['returnedValue'] }
]
}

View File

@@ -0,0 +1,27 @@
export default {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
event: {
enum: ['jobCall.start']
},
runJobId: {
type: 'string',
description: 'instance id of this job'
},
method: {
type: 'string',
description: 'method linked to this call'
},
params: {
type: 'object',
description: 'params of the called method'
}
},
required: [
'event',
'runJobId',
'method',
'params'
]
}

21
src/schemas/log/jobEnd.js Normal file
View File

@@ -0,0 +1,21 @@
export default {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
event: {
enum: ['job.end']
},
runJobId: {
type: 'string',
description: 'instance id of this job'
},
error: {
type: 'object',
description: 'describe one failure, exists if no call has been made'
}
},
required: [
'event',
'runJobId'
]
}

View File

@@ -0,0 +1,26 @@
export default {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
event: {
enum: ['job.start']
},
userId: {
type: 'string',
description: 'user who executes this job'
},
jobId: {
type: 'string',
description: 'identifier of this job'
},
key: {
type: 'string'
}
},
required: [
'event',
'userId',
'jobId',
'key'
]
}

41
src/schemas/plugin.js Normal file
View File

@@ -0,0 +1,41 @@
export default {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
id: {
type: 'string',
description: 'unique identifier for this plugin'
},
name: {
type: 'string',
description: 'unique human readable name for this plugin'
},
autoload: {
type: 'boolean',
description: 'whether this plugin is loaded on startup'
},
loaded: {
type: 'boolean',
description: 'whether or not this plugin is currently loaded'
},
unloadable: {
type: 'boolean',
default: 'true',
description: 'whether or not this plugin can be unloaded'
},
configuration: {
type: 'object',
description: 'current configuration of this plugin (not present if none)'
},
configurationSchema: {
$ref: 'http://json-schema.org/draft-04/schema#',
description: 'configuration schema for this plugin (not present if not configurable)'
}
},
required: [
'id',
'name',
'autoload',
'loaded'
]
}

View File

@@ -1,16 +1,46 @@
import base64url from 'base64url'
import eventToPromise from 'event-to-promise'
import forEach from 'lodash.foreach'
import has from 'lodash.has'
import humanFormat from 'human-format'
import invert from 'lodash.invert'
import isArray from 'lodash.isarray'
import isString from 'lodash.isstring'
import kindOf from 'kindof'
import multiKeyHashInt from 'multikey-hash'
import xml2js from 'xml2js'
import { defer } from 'promise-toolbox'
import {promisify} from 'bluebird'
import {randomBytes} from 'crypto'
import {
createHash,
randomBytes
} from 'crypto'
import { Readable } from 'stream'
import through2 from 'through2'
import {utcFormat as d3TimeFormat} from 'd3-time-format'
// ===================================================================
export function bufferToStream (buf) {
const stream = new Readable()
let i = 0
const { length } = buf
stream._read = function (size) {
if (i === length) {
return this.push(null)
}
const newI = Math.min(i + size, length)
this.push(buf.slice(i, newI))
i = newI
}
return stream
}
// -------------------------------------------------------------------
export function camelToSnakeCase (string) {
return string.replace(
/([a-z])([A-Z])/g,
@@ -20,6 +50,97 @@ export function camelToSnakeCase (string) {
// -------------------------------------------------------------------
// Returns an empty object without prototype (if possible).
export const createRawObject = Object.create
? (createObject => () => createObject(null))(Object.create)
: () => ({})
// -------------------------------------------------------------------
const ALGORITHM_TO_ID = {
md5: '1',
sha256: '5',
sha512: '6'
}
const ID_TO_ALGORITHM = invert(ALGORITHM_TO_ID)
// Wrap a readable stream in a stream with a checksum promise
// attribute which is resolved at the end of an input stream.
// (Finally .checksum contains the checksum of the input stream)
//
// Example:
// const sourceStream = ...
// const targetStream = ...
// const checksumStream = addChecksumToReadStream(sourceStream)
// await Promise.all([
// eventToPromise(checksumStream.pipe(targetStream), 'finish'),
// checksumStream.checksum.then(console.log)
// ])
export const addChecksumToReadStream = (stream, algorithm = 'md5') => {
const algorithmId = ALGORITHM_TO_ID[algorithm]
if (!algorithmId) {
throw new Error(`unknown algorithm: ${algorithm}`)
}
const hash = createHash(algorithm)
const { promise, resolve } = defer()
const wrapper = stream.pipe(through2(
(chunk, enc, callback) => {
hash.update(chunk)
callback(null, chunk)
},
callback => {
resolve(hash.digest('hex'))
callback()
}
))
stream.on('error', error => wrapper.emit('error', error))
wrapper.checksum = promise.then(hash => `$${algorithmId}$$${hash}`)
return wrapper
}
// Check if the checksum of a readable stream is equals to an expected checksum.
// The given stream is wrapped in a stream which emits an error event
// if the computed checksum is not equals to the expected checksum.
export const validChecksumOfReadStream = (stream, expectedChecksum) => {
const algorithmId = expectedChecksum.slice(1, expectedChecksum.indexOf('$', 1))
if (!algorithmId) {
throw new Error(`unknown algorithm: ${algorithmId}`)
}
const hash = createHash(ID_TO_ALGORITHM[algorithmId])
const wrapper = stream.pipe(through2(
{ highWaterMark: 0 },
(chunk, enc, callback) => {
hash.update(chunk)
callback(null, chunk)
},
callback => {
const checksum = `$${algorithmId}$$${hash.digest('hex')}`
callback(
checksum !== expectedChecksum
? new Error(`Bad checksum (${checksum}), expected: ${expectedChecksum}`)
: null
)
}
))
stream.on('error', error => wrapper.emit('error', error))
wrapper.checksumVerified = eventToPromise(wrapper, 'end')
return wrapper
}
// -------------------------------------------------------------------
// Ensure the value is an array, wrap it if necessary.
export function ensureArray (value) {
if (value === undefined) {
@@ -82,24 +203,162 @@ export const parseXml = (function () {
// This function does nothing and returns undefined.
//
// It is often used to swallow promise's errors.
export function noop () {}
export const noop = () => {}
// -------------------------------------------------------------------
export const isPromise = value => (
value != null &&
typeof value.then === 'function'
)
const _pAll = (promises, mapFn) => {
let mainPromise = Promise.resolve()
const results = mapFn
? (promises = map(promises, mapFn))
: 'length' in promises
? new Array(promises.length)
: {}
forEach(promises, (promise, key) => {
mainPromise = mainPromise
.then(() => promise)
.then(value => {
results[key] = value
})
})
return mainPromise.then(() => results)
}
// Returns a promise which resolves when all the promises in a
// collection have resolved or rejects with the reason of the first
// promise that rejects.
//
// Optionally a function can be provided to map all items in the
// collection before waiting for completion.
//
// Usage: pAll(promises, [ mapFn ]) or promises::pAll([ mapFn ])
export function pAll (promises, mapFn) {
if (this) {
mapFn = promises
promises = this
}
return Promise.resolve(promises)
.then(promises => _pAll(promises, mapFn))
}
// Usage: pDebug(promise, name) or promise::pDebug(name)
export function pDebug (promise, name) {
if (arguments.length === 1) {
name = promise
promise = this
}
Promise.resolve(promise).then(
value => {
console.log(
'%s',
`Promise ${name} resolved${value !== undefined ? ` with ${kindOf(value)}` : ''}`
)
},
reason => {
console.log(
'%s',
`Promise ${name} rejected${reason !== undefined ? ` with ${kindOf(reason)}` : ''}`
)
}
)
return promise
}
// Ponyfill for Promise.finally(cb)
export const pFinally = (promise, cb) => {
return promise.then(
(value) => constructor.resolve(cb()).then(() => value),
(reason) => constructor.resolve(cb()).then(() => {
//
// Usage: promise::pFinally(cb)
export function pFinally (cb) {
return this.then(
value => this.constructor.resolve(cb()).then(() => value),
reason => this.constructor.resolve(cb()).then(() => {
throw reason
})
)
}
// Usage:
//
// pFromCallback(cb => fs.readFile('foo.txt', cb))
// .then(content => {
// console.log(content)
// })
export const pFromCallback = fn => new Promise((resolve, reject) => {
fn((error, result) => error
? reject(error)
: resolve(result)
)
})
const _pReflectResolution = (__proto__ => value => ({
__proto__,
value: () => value
}))({
isFulfilled: () => true,
isRejected: () => false,
reason: () => {
throw new Error('no reason, the promise has resolved')
}
})
const _pReflectRejection = (__proto__ => reason => ({
__proto__,
reason: () => reason
}))({
isFulfilled: () => false,
isRejected: () => true,
value: () => {
throw new Error('no value, the promise has rejected')
}
})
// Returns a promise that is always successful when this promise is
// settled. Its fulfillment value is an object that implements the
// PromiseInspection interface and reflects the resolution this
// promise.
//
// Usage: pReflect(promise) or promise::pReflect()
export function pReflect (promise) {
return Promise.resolve(this || promise).then(
_pReflectResolution,
_pReflectRejection
)
}
// Given a collection (array or object) which contains promises,
// return a promise that is fulfilled when all the items in the
// collection are either fulfilled or rejected.
//
// This promise will be fulfilled with a collection (of the same type,
// array or object) containing promise inspections.
//
// Usage: pSettle(promises) or promises::pSettle()
export function pSettle (promises) {
return pAll(this || promises, pReflect)
}
// -------------------------------------------------------------------
export {promisify}
export {promisifyAll} from 'bluebird'
export {
// Create a function which returns promises instead of taking a
// callback.
promisify,
// For all enumerable methods of an object, create a new method
// which name is suffixed with `Async` which return promises instead
// of taking a callback.
promisifyAll
} from 'bluebird'
// -------------------------------------------------------------------
@@ -121,30 +380,78 @@ export function parseSize (size) {
// -------------------------------------------------------------------
const _has = Object.prototype.hasOwnProperty
// Removes an own property from an object and returns its value.
export const popProperty = obj => {
for (const prop in obj) {
if (_has.call(obj, prop)) {
return extractProperty(obj, prop)
}
}
}
// -------------------------------------------------------------------
// Format a date in ISO 8601 in a safe way to be used in filenames
// (even on Windows).
export const safeDateFormat = d3TimeFormat('%Y%m%dT%H%M%SZ')
// -------------------------------------------------------------------
// This functions are often used throughout xo-server.
//
// Exports them from here to avoid direct dependencies on lodash.
export { default as forEach } from 'lodash.foreach'
export { default as isEmpty } from 'lodash.isempty'
export { default as mapToArray } from 'lodash.map'
// -------------------------------------------------------------------
// Special value which can be returned to stop an iteration in map()
// and mapInPlace().
export const done = {}
export const DONE = {}
// Similar to `lodash.map()` for array and `lodash.mapValues()` for
// objects.
// Fill `target` by running each element in `collection` through
// `iteratee`.
//
// Note: can be interrupted by returning the special value `done`
// provided as the forth argument.
export function map (col, iterator, thisArg = this) {
const result = has(col, 'length') ? [] : {}
forEach(col, (item, i) => {
const value = iterator.call(thisArg, item, i, done)
if (value === done) {
// If `target` is undefined, it defaults to a new array if
// `collection` is array-like (has a `length` property), otherwise an
// object.
//
// The context of `iteratee` can be specified via `thisArg`.
//
// Note: the Mapping can be interrupted by returning the special value
// `DONE` provided as the fourth argument.
//
// Usage: map(collection, item => item + 1)
export function map (
collection,
iteratee,
thisArg,
target = has(collection, 'length') ? [] : {}
) {
forEach(collection, (item, i) => {
const value = iteratee.call(thisArg, item, i, collection, DONE)
if (value === DONE) {
return false
}
result[i] = value
target[i] = value
})
return result
return target
}
// Helper to `map()` to update the current collection.
export function mapInPlace (collection, iteratee, thisArg) {
return map(collection, iteratee, thisArg, collection)
}
// -------------------------------------------------------------------
// Create a hash from multiple values.
export const multiKeyHash = (...args) => new Promise((resolve, reject) => {
export const multiKeyHash = (...args) => new Promise(resolve => {
const hash = multiKeyHashInt(...args)
const buf = new Buffer(4)
@@ -153,22 +460,7 @@ export const multiKeyHash = (...args) => new Promise((resolve, reject) => {
resolve(base64url(buf))
})
// Similar to `map()` but change the current collection.
//
// Note: can be interrupted by returning the special value `done`
// provided as the forth argument.
export function mapInPlace (col, iterator, thisArg = this) {
forEach(col, (item, i) => {
const value = iterator.call(thisArg, item, i, done)
if (value === done) {
return false
}
col[i] = value
})
return col
}
// -------------------------------------------------------------------
// Wrap a value in a function.
export const wrap = (value) => () => value
export const wrap = value => () => value

View File

@@ -1,16 +1,20 @@
/* eslint-env mocha */
import expect from 'must'
import sinon from 'sinon'
// ===================================================================
import {
camelToSnakeCase,
createRawObject,
ensureArray,
extractProperty,
formatXml,
generateToken,
parseSize
parseSize,
pFinally,
pSettle
} from './utils'
// ===================================================================
@@ -31,6 +35,28 @@ describe('camelToSnakeCase()', function () {
// -------------------------------------------------------------------
describe('createRawObject()', () => {
it('returns an object', () => {
expect(createRawObject()).to.be.an.object()
})
it('returns an empty object', () => {
expect(createRawObject()).to.be.empty()
})
it('creates a new object each time', () => {
expect(createRawObject()).to.not.equal(createRawObject())
})
if (Object.getPrototypeOf) {
it('creates an object without a prototype', () => {
expect(Object.getPrototypeOf(createRawObject())).to.be.null()
})
}
})
// -------------------------------------------------------------------
describe('ensureArray()', function () {
it('wrap the value in an array', function () {
const value = 'foo'
@@ -115,3 +141,93 @@ describe('parseSize()', function () {
expect(parseSize('3MB')).to.equal(3e6)
})
})
// -------------------------------------------------------------------
describe('pFinally()', () => {
it('calls a callback on resolution', async () => {
const value = {}
const spy = sinon.spy()
await expect(
Promise.resolve(value)::pFinally(spy)
).to.resolve.to.equal(
value
)
expect(spy.callCount).to.equal(1)
})
it('calls a callback on rejection', async () => {
const reason = {}
const spy = sinon.spy()
await expect(
Promise.reject(reason)::pFinally(spy)
).to.reject.to.equal(
reason
)
expect(spy.callCount).to.equal(1)
})
})
// -------------------------------------------------------------------
describe('pSettle()', () => {
it('works with arrays', async () => {
const [
status1,
status2,
status3
] = await pSettle([
Promise.resolve(42),
Math.PI,
Promise.reject('fatality')
])
expect(status1.isRejected()).to.equal(false)
expect(status2.isRejected()).to.equal(false)
expect(status3.isRejected()).to.equal(true)
expect(status1.isFulfilled()).to.equal(true)
expect(status2.isFulfilled()).to.equal(true)
expect(status3.isFulfilled()).to.equal(false)
expect(status1.value()).to.equal(42)
expect(status2.value()).to.equal(Math.PI)
expect(::status3.value).to.throw()
expect(::status1.reason).to.throw()
expect(::status2.reason).to.throw()
expect(status3.reason()).to.equal('fatality')
})
it('works with objects', async () => {
const {
a: status1,
b: status2,
c: status3
} = await pSettle({
a: Promise.resolve(42),
b: Math.PI,
c: Promise.reject('fatality')
})
expect(status1.isRejected()).to.equal(false)
expect(status2.isRejected()).to.equal(false)
expect(status3.isRejected()).to.equal(true)
expect(status1.isFulfilled()).to.equal(true)
expect(status2.isFulfilled()).to.equal(true)
expect(status3.isFulfilled()).to.equal(false)
expect(status1.value()).to.equal(42)
expect(status2.value()).to.equal(Math.PI)
expect(::status3.value).to.throw()
expect(::status1.reason).to.throw()
expect(::status2.reason).to.throw()
expect(status3.reason()).to.equal('fatality')
})
})

View File

@@ -1,4 +1,3 @@
import assign from 'lodash.assign'
import createDebug from 'debug'
import WebSocket from 'ws'
@@ -12,9 +11,11 @@ const defaults = {
// Proxy a WebSocket `client` to a remote server which has `url` as
// address.
export default function wsProxy (client, url, opts) {
opts = assign({}, defaults, {
protocol: client.protocol
}, opts)
opts = {
...defaults,
protocol: client.protocol,
...opts
}
const autoClose = !!opts.autoClose
delete opts.autoClose

611
src/xapi-object-to-xo.js Normal file
View File

@@ -0,0 +1,611 @@
import isArray from 'lodash.isarray'
import {
ensureArray,
extractProperty,
forEach,
mapToArray,
parseXml
} from './utils'
import {
isHostRunning,
isVmHvm,
isVmRunning,
parseDateTime
} from './xapi'
// ===================================================================
const {
defineProperties,
freeze
} = Object
function link (obj, prop, idField = '$id') {
const dynamicValue = obj[`$${prop}`]
if (dynamicValue == null) {
return dynamicValue // Properly handles null and undefined.
}
if (isArray(dynamicValue)) {
return mapToArray(dynamicValue, idField)
}
return dynamicValue[idField]
}
// Parse a string date time to a Unix timestamp (in seconds).
//
// If there are no data or if the timestamp is 0, returns null.
function toTimestamp (date) {
if (!date) {
return null
}
const ms = parseDateTime(date).getTime()
if (!ms) {
return null
}
return Math.round(ms / 1000)
}
// ===================================================================
const TRANSFORMS = {
pool (obj) {
return {
default_SR: link(obj, 'default_SR'),
HA_enabled: Boolean(obj.ha_enabled),
master: link(obj, 'master'),
tags: obj.tags,
name_description: obj.name_description,
name_label: obj.name_label || obj.$master.name_label
// TODO
// - ? networks = networksByPool.items[pool.id] (network.$pool.id)
// - hosts = hostsByPool.items[pool.id] (host.$pool.$id)
// - patches = poolPatchesByPool.items[pool.id] (poolPatch.$pool.id)
// - SRs = srsByContainer.items[pool.id] (sr.$container.id)
// - templates = vmTemplatesByContainer.items[pool.id] (vmTemplate.$container.$id)
// - VMs = vmsByContainer.items[pool.id] (vm.$container.id)
// - $running_hosts = runningHostsByPool.items[pool.id] (runningHost.$pool.id)
// - $running_VMs = runningVmsByPool.items[pool.id] (runningHost.$pool.id)
// - $VMs = vmsByPool.items[pool.id] (vm.$pool.id)
}
},
// -----------------------------------------------------------------
host (obj) {
const {
$metrics: metrics,
other_config: otherConfig
} = obj
const isRunning = isHostRunning(obj)
return {
address: obj.address,
bios_strings: obj.bios_strings,
build: obj.software_version.build_number,
CPUs: obj.cpu_info,
enabled: Boolean(obj.enabled),
current_operations: obj.current_operations,
hostname: obj.hostname,
iSCSI_name: otherConfig.iscsi_iqn || null,
name_description: obj.name_description,
name_label: obj.name_label,
memory: (function () {
if (metrics) {
const free = +metrics.memory_free
const total = +metrics.memory_total
return {
usage: total - free,
size: total
}
}
return {
usage: 0,
total: 0
}
})(),
patches: link(obj, 'patches'),
powerOnMode: obj.power_on_mode,
power_state: isRunning ? 'Running' : 'Halted',
tags: obj.tags,
version: obj.software_version.product_version,
// TODO: dedupe.
PIFs: link(obj, 'PIFs'),
$PIFs: link(obj, 'PIFs'),
PCIs: link(obj, 'PCIs'),
$PCIs: link(obj, 'PCIs'),
PGPUs: link(obj, 'PGPUs'),
$PGPUs: link(obj, 'PGPUs'),
$PBDs: link(obj, 'PBDs')
// TODO:
// - controller = vmControllersByContainer.items[host.id]
// - SRs = srsByContainer.items[host.id]
// - tasks = tasksByHost.items[host.id]
// - templates = vmTemplatesByContainer.items[host.id]
// - VMs = vmsByContainer.items[host.id]
// - $vCPUs = sum(host.VMs, vm => host.CPUs.number)
}
},
// -----------------------------------------------------------------
vm (obj) {
const {
$guest_metrics: guestMetrics,
$metrics: metrics,
other_config: otherConfig
} = obj
const isHvm = isVmHvm(obj)
const isRunning = isVmRunning(obj)
const vm = {
// type is redefined after for controllers/, templates &
// snapshots.
type: 'VM',
addresses: guestMetrics && guestMetrics.networks || null,
auto_poweron: Boolean(otherConfig.auto_poweron),
boot: obj.HVM_boot_params,
CPUs: {
max: +obj.VCPUs_max,
number: (
isRunning && metrics
? +metrics.VCPUs_number
: +obj.VCPUs_at_startup
)
},
current_operations: obj.current_operations,
docker: (function () {
const monitor = otherConfig['xscontainer-monitor']
if (!monitor) {
return
}
if (monitor === 'False') {
return {
enabled: false
}
}
const {
docker_ps: process,
docker_info: info,
docker_version: version
} = otherConfig
return {
enabled: true,
info: info && parseXml(info).docker_info,
process: process && parseXml(process).docker_ps,
version: version && parseXml(version).docker_version
}
})(),
// TODO: there is two possible value: "best-effort" and "restart"
high_availability: Boolean(obj.ha_restart_priority),
memory: (function () {
const dynamicMin = +obj.memory_dynamic_min
const dynamicMax = +obj.memory_dynamic_max
const staticMin = +obj.memory_static_min
const staticMax = +obj.memory_static_max
const memory = {
dynamic: [ dynamicMin, dynamicMax ],
static: [ staticMin, staticMax ]
}
const gmMemory = guestMetrics && guestMetrics.memory
if (!isRunning) {
memory.size = dynamicMax
} else if (gmMemory && gmMemory.used) {
memory.usage = +gmMemory.used
memory.size = +gmMemory.total
} else if (metrics) {
memory.size = +metrics.memory_actual
} else {
memory.size = dynamicMax
}
return memory
})(),
name_description: obj.name_description,
name_label: obj.name_label,
other: otherConfig,
os_version: guestMetrics && guestMetrics.os_version || null,
power_state: obj.power_state,
snapshots: link(obj, 'snapshots'),
tags: obj.tags,
VIFs: link(obj, 'VIFs'),
virtualizationMode: isHvm ? 'hvm' : 'pv',
// <=> Are the Xen Server tools installed?
//
// - undefined: unknown status
// - false: not optimized
// - 'out of date': optimized but drivers should be updated
// - 'up to date': optimized
xenTools: (() => {
if (!isRunning || !metrics) {
// Unknown status, returns nothing.
return
}
if (!guestMetrics) {
return false
}
const { PV_drivers_version: { major, minor } } = guestMetrics
if (major === undefined || minor === undefined) {
return false
}
return guestMetrics.PV_drivers_up_to_date
? 'up to date'
: 'out of date'
})(),
$container: (
isRunning
? link(obj, 'resident_on')
: link(obj, 'pool') // TODO: handle local VMs (`VM.get_possible_hosts()`).
),
$VBDs: link(obj, 'VBDs'),
// TODO: dedupe
VGPUs: link(obj, 'VGPUs'),
$VGPUs: link(obj, 'VGPUs')
}
if (obj.is_control_domain) {
vm.type += '-controller'
} else if (obj.is_a_snapshot) {
vm.type += '-snapshot'
vm.snapshot_time = toTimestamp(obj.snapshot_time)
vm.$snapshot_of = link(obj, 'snapshot_of')
} else if (obj.is_a_template) {
vm.type += '-template'
vm.CPUs.number = +obj.VCPUs_at_startup
vm.template_info = {
arch: otherConfig['install-arch'],
disks: (function () {
const {disks: xml} = otherConfig
let data
if (!xml || !(data = parseXml(xml)).provision) {
return []
}
const disks = ensureArray(data.provision.disk)
forEach(disks, function normalize (disk) {
disk.bootable = disk.bootable === 'true'
disk.size = +disk.size
disk.SR = extractProperty(disk, 'sr')
})
return disks
})(),
install_methods: (function () {
const {['install-methods']: methods} = otherConfig
return methods ? methods.split(',') : []
})(),
install_repository: otherConfig['install-repository']
}
}
if (obj.VCPUs_params && obj.VCPUs_params.weight) {
vm.cpuWeight = obj.VCPUs_params.weight
}
if (!isHvm) {
vm.PV_args = obj.PV_args
}
return vm
},
// -----------------------------------------------------------------
sr (obj) {
return {
type: 'SR',
content_type: obj.content_type,
// TODO: Should it replace usage?
physical_usage: +obj.physical_utilisation,
name_description: obj.name_description,
name_label: obj.name_label,
size: +obj.physical_size,
SR_type: obj.type,
tags: obj.tags,
usage: +obj.virtual_allocation,
VDIs: link(obj, 'VDIs'),
$container: (
obj.shared
? link(obj, 'pool')
: obj.$PBDs[0] && link(obj.$PBDs[0], 'host')
),
$PBDs: link(obj, 'PBDs')
}
},
// -----------------------------------------------------------------
pbd (obj) {
return {
type: 'PBD',
attached: obj.currently_attached,
host: link(obj, 'host'),
SR: link(obj, 'SR')
}
},
// -----------------------------------------------------------------
pif (obj) {
return {
type: 'PIF',
attached: Boolean(obj.currently_attached),
device: obj.device,
IP: obj.IP,
MAC: obj.MAC,
management: Boolean(obj.management), // TODO: find a better name.
mode: obj.ip_configuration_mode,
MTU: +obj.MTU,
netmask: obj.netmask,
vlan: +obj.VLAN,
// TODO: What is it?
//
// Could it mean “is this a physical interface?”.
// How could a PIF not be physical?
// physical: obj.physical,
$host: link(obj, 'host'),
$network: link(obj, 'network')
}
},
// -----------------------------------------------------------------
vdi (obj) {
if (!obj.managed) {
return
}
const vdi = {
type: 'VDI',
name_description: obj.name_description,
name_label: obj.name_label,
size: +obj.virtual_size,
snapshots: link(obj, 'snapshots'),
tags: obj.tags,
usage: +obj.physical_utilisation,
$SR: link(obj, 'SR'),
$VBDs: link(obj, 'VBDs')
}
if (obj.is_a_snapshot) {
vdi.type += '-snapshot'
vdi.snapshot_time = toTimestamp(obj.snapshot_time)
vdi.$snapshot_of = link(obj, 'snapshot_of')
}
return vdi
},
// -----------------------------------------------------------------
vbd (obj) {
return {
type: 'VBD',
attached: Boolean(obj.currently_attached),
bootable: Boolean(obj.bootable),
is_cd_drive: obj.type === 'CD',
position: obj.userdevice,
read_only: obj.mode === 'RO',
VDI: link(obj, 'VDI'),
VM: link(obj, 'VM')
}
},
// -----------------------------------------------------------------
vif (obj) {
return {
type: 'VIF',
attached: Boolean(obj.currently_attached),
device: obj.device, // TODO: should it be cast to a number?
MAC: obj.MAC,
MTU: +obj.MTU,
$network: link(obj, 'network'),
$VM: link(obj, 'VM')
}
},
// -----------------------------------------------------------------
network (obj) {
return {
bridge: obj.bridge,
MTU: +obj.MTU,
name_description: obj.name_description,
name_label: obj.name_label,
tags: obj.tags,
PIFs: link(obj, 'PIFs'),
VIFs: link(obj, 'VIFs')
}
},
// -----------------------------------------------------------------
message (obj) {
return {
body: obj.body,
name: obj.name,
time: toTimestamp(obj.timestamp),
$object: obj.obj_uuid // Special link as it is already an UUID.
}
},
// -----------------------------------------------------------------
task (obj) {
return {
created: toTimestamp(obj.created),
current_operations: obj.current_operations,
finished: toTimestamp(obj.finished),
name_description: obj.name_description,
name_label: obj.name_label,
progress: +obj.progress,
result: obj.result,
status: obj.status,
$host: link(obj, 'resident_on')
}
},
// -----------------------------------------------------------------
host_patch (obj) {
return {
applied: Boolean(obj.applied),
time: toTimestamp(obj.timestamp_applied),
pool_patch: link(obj, 'pool_patch', '$ref'),
$host: link(obj, 'host')
}
},
// -----------------------------------------------------------------
pool_patch (obj) {
return {
id: obj.$ref,
applied: Boolean(obj.pool_applied),
description: obj.name_description,
guidance: obj.after_apply_guidance,
name: obj.name_label,
size: +obj.size,
uuid: obj.uuid,
// TODO: what does it mean, should we handle it?
// version: obj.version,
// TODO: host.[$]pool_patches ←→ pool.[$]host_patches
$host_patches: link(obj, 'host_patches')
}
},
// -----------------------------------------------------------------
pci (obj) {
return {
type: 'PCI',
class_name: obj.class_name,
device_name: obj.device_name,
pci_id: obj.pci_id,
$host: link(obj, 'host')
}
},
// -----------------------------------------------------------------
pgpu (obj) {
return {
type: 'PGPU',
pci: link(obj, 'PCI'),
// TODO: dedupe.
host: link(obj, 'host'),
$host: link(obj, 'host'),
vgpus: link(obj, 'resident_VGPUs'),
$vgpus: link(obj, 'resident_VGPUs')
}
},
// -----------------------------------------------------------------
vgpu (obj) {
return {
type: 'VGPU',
currentlyAttached: Boolean(obj.currently_attached),
device: obj.device,
resident_on: link(obj, 'resident_on'),
vm: link(obj, 'VM')
}
}
}
// ===================================================================
export default xapiObj => {
const transform = TRANSFORMS[xapiObj.$type.toLowerCase()]
if (!transform) {
return
}
const xoObj = transform(xapiObj)
if (!xoObj) {
return
}
if (!('id' in xoObj)) {
xoObj.id = xapiObj.$id
}
if (!('type' in xoObj)) {
xoObj.type = xapiObj.$type
}
if (
'uuid' in xapiObj &&
!('uuid' in xoObj)
) {
xoObj.uuid = xapiObj.uuid
}
xoObj.$pool = xapiObj.$pool.$id
xoObj.$poolId = xoObj.$pool // TODO: deprecated, remove when no longer used in xo-web
// Internal properties.
defineProperties(xoObj, {
_xapiId: {
value: xapiObj.$id
},
_xapiRef: {
value: xapiObj.$ref
}
})
// Freezes and returns the new object.
return freeze(xoObj)
}

View File

@@ -1,517 +0,0 @@
import forEach from 'lodash.foreach'
import isArray from 'lodash.isarray'
import map from 'lodash.map'
import {
ensureArray,
extractProperty,
parseXml
} from './utils'
import {
isHostRunning,
isVmRunning
} from './xapi'
// ===================================================================
function link (obj, prop) {
const dynamicValue = obj[`$${prop}`]
if (dynamicValue == null) {
return dynamicValue // Properly handles null and undefined.
}
if (isArray(dynamicValue)) {
return map(dynamicValue, '$id')
}
return dynamicValue.$id
}
// The JSON interface of XAPI format dates incorrectly.
const JSON_DATE_RE = /^(\d{4})(\d{2})(\d{2})T(.+)$/
function fixJsonDate (date) {
const matches = JSON_DATE_RE.exec(date)
if (!matches) {
return date
}
const [, year, month, day, time] = matches
return `${year}-${month}-${day}T${time}`
}
function toTimestamp (date) {
if (!date) {
return null
}
return Math.round(Date.parse(fixJsonDate(date)) / 1000)
}
// ===================================================================
export function pool (obj) {
return {
default_SR: link(obj, 'default_SR'),
HA_enabled: Boolean(obj.ha_enabled),
master: link(obj, 'master'),
tags: obj.tags,
name_description: obj.name_description,
name_label: obj.name_label || obj.$master.name_label
// TODO
// - ? networks = networksByPool.items[pool.id] (network.$pool.id)
// - hosts = hostsByPool.items[pool.id] (host.$pool.$id)
// - patches = poolPatchesByPool.items[pool.id] (poolPatch.$pool.id)
// - SRs = srsByContainer.items[pool.id] (sr.$container.id)
// - templates = vmTemplatesByContainer.items[pool.id] (vmTemplate.$container.$id)
// - VMs = vmsByContainer.items[pool.id] (vm.$container.id)
// - $running_hosts = runningHostsByPool.items[pool.id] (runningHost.$pool.id)
// - $running_VMs = runningVmsByPool.items[pool.id] (runningHost.$pool.id)
// - $VMs = vmsByPool.items[pool.id] (vm.$pool.id)
}
}
// -------------------------------------------------------------------
export function host (obj) {
const {
$metrics: metrics,
other_config: otherConfig
} = obj
const isRunning = isHostRunning(obj)
return {
address: obj.address,
bios_strings: obj.bios_strings,
build: obj.software_version.build_number,
CPUs: obj.cpu_info,
enabled: Boolean(obj.enabled),
current_operations: obj.current_operations,
hostname: obj.hostname,
iSCSI_name: otherConfig.iscsi_iqn || null,
name_description: obj.name_description,
name_label: obj.name_label,
memory: (function () {
if (metrics) {
const free = +metrics.memory_free
const total = +metrics.memory_total
return {
usage: total - free,
size: total
}
}
return {
usage: 0,
total: 0
}
})(),
patches: link(obj, 'patches'),
powerOnMode: obj.power_on_mode,
power_state: isRunning ? 'Running' : 'Halted',
tags: obj.tags,
version: obj.software_version.product_version,
// TODO: dedupe.
PIFs: link(obj, 'PIFs'),
$PIFs: link(obj, 'PIFs'),
PCIs: link(obj, 'PCIs'),
$PCIs: link(obj, 'PCIs'),
PGPUs: link(obj, 'PGPUs'),
$PGPUs: link(obj, 'PGPUs'),
$PBDs: link(obj, 'PBDs')
// TODO:
// - controller = vmControllersByContainer.items[host.id]
// - SRs = srsByContainer.items[host.id]
// - tasks = tasksByHost.items[host.id]
// - templates = vmTemplatesByContainer.items[host.id]
// - VMs = vmsByContainer.items[host.id]
// - $vCPUs = sum(host.VMs, vm => host.CPUs.number)
}
}
// -------------------------------------------------------------------
export function vm (obj) {
const {
$guest_metrics: guestMetrics,
$metrics: metrics,
other_config: otherConfig
} = obj
const isRunning = isVmRunning(obj)
const vm = {
// type is redefined after for controllers/, templates &
// snapshots.
type: 'VM',
addresses: guestMetrics && guestMetrics.networks || null,
auto_poweron: Boolean(otherConfig.auto_poweron),
boot: obj.HVM_boot_params,
CPUs: {
max: +obj.VCPUs_max,
number: (
isRunning && metrics
? +metrics.VCPUs_number
: +obj.VCPUs_at_startup
)
},
current_operations: obj.current_operations,
docker: (function () {
const monitor = otherConfig['xscontainer-monitor']
if (!monitor) {
return
}
if (monitor === 'False') {
return {
enabled: false
}
}
const {
docker_ps: process,
docker_info: info,
docker_version: version
} = otherConfig
return {
enabled: true,
info: info && parseXml(info).docker_info,
process: process && parseXml(process).docker_ps,
version: version && parseXml(version).docker_version
}
})(),
// TODO: there is two possible value: "best-effort" and "restart"
high_availability: Boolean(obj.ha_restart_priority),
memory: (function () {
const dynamicMin = +obj.memory_dynamic_min
const dynamicMax = +obj.memory_dynamic_max
const staticMin = +obj.memory_static_min
const staticMax = +obj.memory_static_max
const memory = {
dynamic: [ dynamicMin, dynamicMax ],
static: [ staticMin, staticMax ]
}
const gmMemory = guestMetrics && guestMetrics.memory
if (!isRunning) {
memory.size = dynamicMax
} else if (gmMemory && gmMemory.used) {
memory.usage = +gmMemory.used
memory.size = +gmMemory.total
} else if (metrics) {
memory.size = +metrics.memory_actual
} else {
memory.size = dynamicMax
}
return memory
})(),
name_description: obj.name_description,
name_label: obj.name_label,
other: otherConfig,
os_version: guestMetrics && guestMetrics.os_version || null,
power_state: obj.power_state,
PV_args: obj.PV_args,
PV_drivers: Boolean(guestMetrics),
PV_drivers_up_to_date: Boolean(guestMetrics && guestMetrics.PV_drivers_up_to_date),
snapshot_time: toTimestamp(obj.snapshot_time),
snapshots: link(obj, 'snapshots'),
tags: obj.tags,
VIFs: link(obj, 'VIFs'),
$container: (
isRunning
? link(obj, 'resident_on')
: link(obj, 'pool') // TODO: handle local VMs (`VM.get_possible_hosts()`).
),
$VBDs: link(obj, 'VBDs'),
// TODO: dedupe
VGPUs: link(obj, 'VGPUs'),
$VGPUs: link(obj, 'VGPUs')
}
if (obj.is_control_domain) {
vm.type += '-controller'
} else if (obj.is_a_snapshot) {
vm.type += '-snapshot'
vm.$snapshot_of = link(obj, 'snapshot_of')
} else if (obj.is_a_template) {
vm.type += '-template'
vm.CPUs.number = +obj.VCPUs_at_startup
vm.template_info = {
arch: otherConfig['install-arch'],
disks: (function () {
const {disks: xml} = otherConfig
let data
if (!xml || !(data = parseXml(xml)).provision) {
return []
}
const disks = ensureArray(data.provision.disk)
forEach(disks, function normalize (disk) {
disk.bootable = disk.bootable === 'true'
disk.size = +disk.size
disk.SR = extractProperty(disk, 'sr')
})
return disks
})(),
install_methods: (function () {
const {['install-methods']: methods} = otherConfig
return methods ? methods.split(',') : []
})()
}
}
return vm
}
// -------------------------------------------------------------------
export function sr (obj) {
return {
type: 'SR',
content_type: obj.content_type,
name_description: obj.name_description,
name_label: obj.name_label,
physical_usage: +obj.physical_utilisation,
size: +obj.physical_size,
SR_type: obj.type,
tags: obj.tags,
usage: +obj.virtual_allocation,
VDIs: link(obj, 'VDIs'),
$container: (
obj.shared
? link(obj, 'pool')
: obj.$PBDs[0] && link(obj.$PBDs[0], 'host')
),
$PBDs: link(obj, 'PBDs')
}
}
// -------------------------------------------------------------------
export function pbd (obj) {
return {
type: 'PBD',
attached: obj.currently_attached,
host: link(obj, 'host'),
SR: link(obj, 'SR')
}
}
// -------------------------------------------------------------------
export function pif (obj) {
return {
type: 'PIF',
attached: Boolean(obj.currently_attached),
device: obj.device,
IP: obj.IP,
MAC: obj.MAC,
management: Boolean(obj.management), // TODO: find a better name.
mode: obj.ip_configuration_mode,
MTU: +obj.MTU,
netmask: obj.netmask,
vlan: +obj.VLAN,
// TODO: What is it?
//
// Could it mean “is this a physical interface?”.
// How could a PIF not be physical?
// physical: obj.physical,
$host: link(obj, 'host'),
$network: link(obj, 'network')
}
}
// -------------------------------------------------------------------
// TODO: should we have a VDI-snapshot type like we have with VMs?
export function vdi (obj) {
if (!obj.managed) {
return
}
return {
type: 'VDI',
name_description: obj.name_description,
name_label: obj.name_label,
size: +obj.virtual_size,
snapshots: link(obj, 'snapshots'),
snapshot_time: toTimestamp(obj.snapshot_time),
tags: obj.tags,
usage: +obj.physical_utilisation,
$snapshot_of: link(obj, 'snapshot_of'),
$SR: link(obj, 'SR'),
$VBDs: link(obj, 'VBDs')
}
}
// -------------------------------------------------------------------
export function vbd (obj) {
return {
type: 'VBD',
attached: Boolean(obj.currently_attached),
bootable: Boolean(obj.bootable),
is_cd_drive: obj.type === 'CD',
position: obj.userdevice,
read_only: obj.mode === 'RO',
VDI: link(obj, 'VDI'),
VM: link(obj, 'VM')
}
}
// -------------------------------------------------------------------
export function vif (obj) {
return {
type: 'VIF',
attached: Boolean(obj.currently_attached),
device: obj.device, // TODO: should it be cast to a number?
MAC: obj.MAC,
MTU: +obj.MTU,
$network: link(obj, 'network'),
$VM: link(obj, 'VM')
}
}
// -------------------------------------------------------------------
export function network (obj) {
return {
bridge: obj.bridge,
MTU: +obj.MTU,
name_description: obj.name_description,
name_label: obj.name_label,
tags: obj.tags,
PIFs: link(obj, 'PIFs'),
VIFs: link(obj, 'VIFs')
}
}
// -------------------------------------------------------------------
export function message (obj) {
return {
body: obj.body,
name: obj.name,
time: toTimestamp(obj.timestamp),
$object: obj.obj_uuid // Special link as it is already an UUID.
}
}
// -------------------------------------------------------------------
export function task (obj) {
return {
created: toTimestamp(obj.created),
current_operations: obj.current_operations,
finished: toTimestamp(obj.finished),
name_description: obj.name_description,
name_label: obj.name_label,
progress: +obj.progress,
result: obj.result,
status: obj.status,
$host: link(obj, 'resident_on')
}
}
// -------------------------------------------------------------------
export function host_patch (obj) {
return {
applied: Boolean(obj.applied),
time: toTimestamp(obj.timestamp_applied),
pool_patch: link(obj, 'pool_patch'),
$host: link(obj, 'host')
}
}
// -------------------------------------------------------------------
export function pool_patch (obj) {
return {
applied: Boolean(obj.pool_applied),
name_description: obj.name_description,
name_label: obj.name_label,
size: +obj.size,
version: obj.version,
// TODO: host.[$]pool_patches ←→ pool.[$]host_patches
$host_patches: link(obj, 'host_patches')
}
}
// -------------------------------------------------------------------
export function pci (obj) {
return {
type: 'PCI',
class_name: obj.class_name,
device_name: obj.device_name,
pci_id: obj.pci_id,
$host: link(obj, 'host')
}
}
// -------------------------------------------------------------------
export function pgpu (obj) {
return {
type: 'PGPU',
pci: link(obj, 'PCI'),
// TODO: dedupe.
host: link(obj, 'host'),
$host: link(obj, 'host'),
vgpus: link(obj, 'resident_VGPUs'),
$vgpus: link(obj, 'resident_VGPUs')
}
}
// -------------------------------------------------------------------
export function vgpu (obj) {
return {
type: 'VGPU',
currentlyAttached: Boolean(obj.currently_attached),
device: obj.device,
resident_on: link(obj, 'resident_on'),
vm: link(obj, 'VM')
}
}

533
src/xapi-stats.js Normal file
View File

@@ -0,0 +1,533 @@
import endsWith from 'lodash.endswith'
import JSON5 from 'json5'
import { BaseError } from 'make-error'
import httpRequest from './http-request'
import { parseDateTime } from './xapi'
const RRD_STEP_SECONDS = 5
const RRD_STEP_MINUTES = 60
const RRD_STEP_HOURS = 3600
const RRD_STEP_DAYS = 86400
const RRD_STEP_FROM_STRING = {
'seconds': RRD_STEP_SECONDS,
'minutes': RRD_STEP_MINUTES,
'hours': RRD_STEP_HOURS,
'days': RRD_STEP_DAYS
}
const RRD_POINTS_PER_STEP = {
[RRD_STEP_SECONDS]: 120,
[RRD_STEP_MINUTES]: 120,
[RRD_STEP_HOURS]: 168,
[RRD_STEP_DAYS]: 366
}
export class XapiStatsError extends BaseError {}
export class UnknownLegendFormat extends XapiStatsError {
constructor (line) {
super('Unknown legend line: ' + line)
}
}
export class FaultyGranularity extends XapiStatsError {
constructor (msg) {
super(msg)
}
}
// -------------------------------------------------------------------
// Utils
// -------------------------------------------------------------------
function makeUrl (hostname, sessionId, timestamp) {
return `https://${hostname}/rrd_updates?session_id=${sessionId}&start=${timestamp}&cf=AVERAGE&host=true&json=true`
}
// Return current local timestamp in seconds
function getCurrentTimestamp () {
return Date.now() / 1000
}
function convertNanToNull (value) {
return isNaN(value) ? null : value
}
async function getServerTimestamp (xapi, host) {
const serverLocalTime = await xapi.call('host.get_servertime', host.$ref)
return Math.floor(parseDateTime(serverLocalTime).getTime() / 1000)
}
// -------------------------------------------------------------------
// Stats
// -------------------------------------------------------------------
function getNewHostStats () {
return {
cpus: [],
pifs: {
rx: [],
tx: []
},
load: [],
memory: [],
memoryFree: [],
memoryUsed: []
}
}
function getNewVmStats () {
return {
cpus: [],
vifs: {
rx: [],
tx: []
},
xvds: {
r: {},
w: {}
},
memory: [],
memoryFree: [],
memoryUsed: []
}
}
// -------------------------------------------------------------------
// Stats legends
// -------------------------------------------------------------------
function getNewHostLegends () {
return {
cpus: [],
pifs: {
rx: [],
tx: []
},
load: null,
memoryFree: null,
memory: null
}
}
function getNewVmLegends () {
return {
cpus: [],
vifs: {
rx: [],
tx: []
},
xvds: {
r: [],
w: []
},
memoryFree: null,
memory: null
}
}
// Compute one legend line for one host
function parseOneHostLegend (hostLegend, type, index) {
let resReg
if ((resReg = /^cpu([0-9]+)$/.exec(type)) !== null) {
hostLegend.cpus[resReg[1]] = index
} else if ((resReg = /^pif_eth([0-9]+)_(rx|tx)$/.exec(type)) !== null) {
if (resReg[2] === 'rx') {
hostLegend.pifs.rx[resReg[1]] = index
} else {
hostLegend.pifs.tx[resReg[1]] = index
}
} else if (type === 'loadavg') {
hostLegend.load = index
} else if (type === 'memory_free_kib') {
hostLegend.memoryFree = index
} else if (type === 'memory_total_kib') {
hostLegend.memory = index
}
}
// Compute one legend line for one vm
function parseOneVmLegend (vmLegend, type, index) {
let resReg
if ((resReg = /^cpu([0-9]+)$/.exec(type)) !== null) {
vmLegend.cpus[resReg[1]] = index
} else if ((resReg = /^vif_([0-9]+)_(rx|tx)$/.exec(type)) !== null) {
if (resReg[2] === 'rx') {
vmLegend.vifs.rx[resReg[1]] = index
} else {
vmLegend.vifs.tx[resReg[1]] = index
}
} else if ((resReg = /^vbd_xvd(.)_(read|write)$/.exec(type))) {
if (resReg[2] === 'read') {
vmLegend.xvds.r[resReg[1]] = index
} else {
vmLegend.xvds.w[resReg[1]] = index
}
} else if (type === 'memory_internal_free') {
vmLegend.memoryFree = index
} else if (endsWith(type, 'memory')) {
vmLegend.memory = index
}
}
// Compute Stats Legends for host and vms from RRD update
function parseLegends (json) {
const hostLegends = getNewHostLegends()
const vmsLegends = {}
json.meta.legend.forEach((value, index) => {
const parsedLine = /^AVERAGE:(host|vm):(.+):(.+)$/.exec(value)
if (parsedLine === null) {
throw new UnknownLegendFormat(value)
}
const [ , name, uuid, type, , ] = parsedLine
if (name !== 'vm') {
parseOneHostLegend(hostLegends, type, index)
} else {
if (vmsLegends[uuid] === undefined) {
vmsLegends[uuid] = getNewVmLegends()
}
parseOneVmLegend(vmsLegends[uuid], type, index)
}
})
return [hostLegends, vmsLegends]
}
export default class XapiStats {
constructor () {
this._vms = {}
this._hosts = {}
}
// -------------------------------------------------------------------
// Remove stats (Helper)
// -------------------------------------------------------------------
_removeOlderStats (source, dest, pointsPerStep) {
for (const key in source) {
if (key === 'cpus') {
for (const cpuIndex in source.cpus) {
dest.cpus[cpuIndex].splice(0, dest.cpus[cpuIndex].length - pointsPerStep)
}
// If the number of cpus has been decreased, remove !
let offset
if ((offset = dest.cpus.length - source.cpus.length) > 0) {
dest.cpus.splice(-offset)
}
} else if (endsWith(key, 'ifs')) {
// For each pif or vif
for (const ifType in source[key]) {
for (const pifIndex in source[key][ifType]) {
dest[key][ifType][pifIndex].splice(0, dest[key][ifType][pifIndex].length - pointsPerStep)
}
// If the number of pifs has been decreased, remove !
let offset
if ((offset = dest[key][ifType].length - source[key][ifType].length) > 0) {
dest[key][ifType].splice(-offset)
}
}
} else if (key === 'xvds') {
for (const xvdType in source.xvds) {
for (const xvdLetter in source.xvds[xvdType]) {
dest.xvds[xvdType][xvdLetter].splice(0, dest.xvds[xvdType][xvdLetter].length - pointsPerStep)
}
// If the number of xvds has been decreased, remove !
// FIXME
}
} else if (key === 'load') {
dest.load.splice(0, dest[key].length - pointsPerStep)
} else if (key === 'memory') {
// Load, memory, memoryFree, memoryUsed
const length = dest.memory.length - pointsPerStep
dest.memory.splice(0, length)
dest.memoryFree.splice(0, length)
dest.memoryUsed.splice(0, length)
}
}
}
// -------------------------------------------------------------------
// HOST: Computation and stats update
// -------------------------------------------------------------------
// Compute one stats row for one host
_parseRowHostStats (hostLegends, hostStats, values) {
// Cpus
hostLegends.cpus.forEach((cpuIndex, index) => {
if (hostStats.cpus[index] === undefined) {
hostStats.cpus[index] = []
}
hostStats.cpus[index].push(values[cpuIndex] * 100)
})
// Pifs
for (const pifType in hostLegends.pifs) {
hostLegends.pifs[pifType].forEach((pifIndex, index) => {
if (hostStats.pifs[pifType][index] === undefined) {
hostStats.pifs[pifType][index] = []
}
hostStats.pifs[pifType][index].push(convertNanToNull(values[pifIndex]))
})
}
// Load
hostStats.load.push(convertNanToNull(values[hostLegends.load]))
// Memory
const memory = values[hostLegends.memory]
const memoryFree = values[hostLegends.memoryFree]
hostStats.memory.push(memory)
if (hostLegends.memoryFree !== undefined) {
hostStats.memoryFree.push(memoryFree)
hostStats.memoryUsed.push(memory - memoryFree)
}
}
// Compute stats for host from RRD update
_parseHostStats (json, hostname, hostLegends, step) {
const host = this._hosts[hostname][step]
if (host.stats === undefined) {
host.stats = getNewHostStats()
}
for (const row of json.data) {
this._parseRowHostStats(hostLegends, host.stats, row.values)
}
}
// -------------------------------------------------------------------
// VM: Computation and stats update
// -------------------------------------------------------------------
// Compute stats for vms from RRD update
_parseRowVmStats (vmLegends, vmStats, values) {
// Cpus
vmLegends.cpus.forEach((cpuIndex, index) => {
if (vmStats.cpus[index] === undefined) {
vmStats.cpus[index] = []
}
vmStats.cpus[index].push(values[cpuIndex] * 100)
})
// Vifs
for (const vifType in vmLegends.vifs) {
vmLegends.vifs[vifType].forEach((vifIndex, index) => {
if (vmStats.vifs[vifType][index] === undefined) {
vmStats.vifs[vifType][index] = []
}
vmStats.vifs[vifType][index].push(convertNanToNull(values[vifIndex]))
})
}
// Xvds
for (const xvdType in vmLegends.xvds) {
for (const index in vmLegends.xvds[xvdType]) {
if (vmStats.xvds[xvdType][index] === undefined) {
vmStats.xvds[xvdType][index] = []
}
vmStats.xvds[xvdType][index].push(convertNanToNull(values[vmLegends.xvds[xvdType][index]]))
}
}
// Memory
// WARNING! memoryFree is in Kb not in b, memory is in b
const memory = values[vmLegends.memory]
const memoryFree = values[vmLegends.memoryFree] * 1024
vmStats.memory.push(memory)
if (vmLegends.memoryFree !== undefined) {
vmStats.memoryFree.push(memoryFree)
vmStats.memoryUsed.push(memory - memoryFree)
}
}
// Compute stats for vms
_parseVmsStats (json, hostname, vmsLegends, step) {
if (this._vms[hostname][step] === undefined) {
this._vms[hostname][step] = {}
}
const vms = this._vms[hostname][step]
for (const uuid in vmsLegends) {
if (vms[uuid] === undefined) {
vms[uuid] = getNewVmStats()
}
}
for (const row of json.data) {
for (const uuid in vmsLegends) {
this._parseRowVmStats(vmsLegends[uuid], vms[uuid], row.values)
}
}
}
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// Execute one http request on a XenServer for get stats
// Return stats (Json format) or throws got exception
async _getJson (url) {
const body = await httpRequest(url, { rejectUnauthorized: false }).readAll()
return JSON5.parse(body)
}
async _getLastTimestamp (xapi, host, step) {
if (this._hosts[host.address][step] === undefined) {
const serverTimeStamp = await getServerTimestamp(xapi, host)
return serverTimeStamp - step * RRD_POINTS_PER_STEP[step] + step
}
return this._hosts[host.address][step].endTimestamp
}
_getPoints (hostname, step, vmId) {
// Return host points
if (vmId === undefined) {
return this._hosts[hostname][step]
}
// Return vm points
const points = { endTimestamp: this._hosts[hostname][step].endTimestamp }
if (this._vms[hostname][step] !== undefined) {
points.stats = this._vms[hostname][step][vmId]
}
return points
}
async _getAndUpdatePoints (xapi, host, vmId, granularity) {
// Get granularity to use
const step = (granularity === undefined || granularity === 0)
? RRD_STEP_SECONDS : RRD_STEP_FROM_STRING[granularity]
if (step === undefined) {
throw new FaultyGranularity(`Unknown granularity: '${granularity}'. Use 'seconds', 'minutes', 'hours', or 'days'.`)
}
// Limit the number of http requests
const hostname = host.address
if (this._hosts[hostname] === undefined) {
this._hosts[hostname] = {}
this._vms[hostname] = {}
}
if (this._hosts[hostname][step] !== undefined &&
this._hosts[hostname][step].localTimestamp + step > getCurrentTimestamp()) {
return this._getPoints(hostname, step, vmId)
}
// Check if we are in the good interval, use this._hosts[hostname][step].localTimestamp
// for avoid bad requests
// TODO
// Get json
const timestamp = await this._getLastTimestamp(xapi, host, step)
let json = await this._getJson(makeUrl(hostname, xapi.sessionId, timestamp))
// Check if the granularity is linked to 'step'
// If it's not the case, we retry other url with the json timestamp
if (json.meta.step !== step) {
console.log(`RRD call: Expected step: ${step}, received step: ${json.meta.step}. Retry with other timestamp`)
const serverTimestamp = await getServerTimestamp(xapi, host)
// Approximately: half points are asked
// FIXME: Not the best solution
json = await this._getJson(makeUrl(hostname, xapi.sessionId, serverTimestamp - step * (RRD_POINTS_PER_STEP[step] / 2) + step))
if (json.meta.step !== step) {
throw new FaultyGranularity(`Unable to get the true granularity: ${json.meta.step}`)
}
}
// Make new backup slot if necessary
if (this._hosts[hostname][step] === undefined) {
this._hosts[hostname][step] = {
endTimestamp: 0,
localTimestamp: 0
}
}
// It exists data
if (json.data.length !== 0) {
// Warning: Sometimes, the json.xport.meta.start value does not match with the
// timestamp of the oldest data value
// So, we use the timestamp of the oldest data value !
const startTimestamp = json.data[json.meta.rows - 1].t
// Remove useless data and reorder
// Note: Older values are at end of json.data.row
const parseOffset = (this._hosts[hostname][step].endTimestamp - startTimestamp + step) / step
json.data.splice(json.data.length - parseOffset)
json.data.reverse()
// It exists useful data
if (json.data.length > 0) {
const [hostLegends, vmsLegends] = parseLegends(json)
// Compute and update host/vms stats
this._parseVmsStats(json, hostname, vmsLegends, step)
this._parseHostStats(json, hostname, hostLegends, step)
// Remove older stats
this._removeOlderStats(hostLegends, this._hosts[hostname][step].stats, RRD_POINTS_PER_STEP[step])
for (const uuid in vmsLegends) {
this._removeOlderStats(vmsLegends[uuid], this._vms[hostname][step][uuid], RRD_POINTS_PER_STEP[step])
}
}
}
// Update timestamp
this._hosts[hostname][step].endTimestamp = json.meta.end
this._hosts[hostname][step].localTimestamp = getCurrentTimestamp()
return this._getPoints(hostname, step, vmId)
}
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// Warning: This functions returns one reference on internal data
// So, data can be changed by a parallel call on this functions
// It is forbidden to modify the returned data
// Return host stats
async getHostPoints (xapi, hostId, granularity) {
const host = xapi.getObject(hostId)
return this._getAndUpdatePoints(xapi, host, undefined, granularity)
}
// Return vms stats
async getVmPoints (xapi, vmId, granularity) {
const vm = xapi.getObject(vmId)
const host = vm.$resident_on
return this._getAndUpdatePoints(xapi, host, vm.uuid, granularity)
}
}

File diff suppressed because it is too large Load Diff

739
src/xo-mixins/backups.js Normal file
View File

@@ -0,0 +1,739 @@
import endsWith from 'lodash.endswith'
import escapeStringRegexp from 'escape-string-regexp'
import eventToPromise from 'event-to-promise'
import execa from 'execa'
import filter from 'lodash.filter'
import findIndex from 'lodash.findindex'
import sortBy from 'lodash.sortby'
import startsWith from 'lodash.startswith'
import {
basename,
dirname
} from 'path'
import { satisfies as versionSatisfies } from 'semver'
import xapiObjectToXo from '../xapi-object-to-xo'
import {
deferrable
} from '../decorators'
import {
forEach,
mapToArray,
noop,
pSettle,
safeDateFormat
} from '../utils'
import {
VDI_FORMAT_VHD
} from '../xapi'
// ===================================================================
const DELTA_BACKUP_EXT = '.json'
const DELTA_BACKUP_EXT_LENGTH = DELTA_BACKUP_EXT.length
// Test if a file is a vdi backup. (full or delta)
const isVdiBackup = name => /^\d+T\d+Z_(?:full|delta)\.vhd$/.test(name)
// Test if a file is a delta vdi backup.
const isDeltaVdiBackup = name => /^\d+T\d+Z_delta\.vhd$/.test(name)
// Get the timestamp of a vdi backup. (full or delta)
const getVdiTimestamp = name => {
const arr = /^(\d+T\d+Z)_(?:full|delta)\.vhd$/.exec(name)
return arr[1]
}
const getDeltaBackupNameWithoutExt = name => name.slice(0, -DELTA_BACKUP_EXT_LENGTH)
const isDeltaBackup = name => endsWith(name, DELTA_BACKUP_EXT)
async function checkFileIntegrity (handler, name) {
let stream
try {
stream = await handler.createReadStream(name, { checksum: true })
} catch (error) {
if (error.code === 'ENOENT') {
return
}
throw error
}
stream.resume()
await eventToPromise(stream, 'finish')
}
// ===================================================================
export default class {
constructor (xo) {
this._xo = xo
}
async listRemoteBackups (remoteId) {
const handler = await this._xo.getRemoteHandler(remoteId)
// List backups. (No delta)
const backupFilter = file => endsWith(file, '.xva')
const files = await handler.list()
const backups = filter(files, backupFilter)
// List delta backups.
const deltaDirs = filter(files, file => startsWith(file, 'vm_delta_'))
for (const deltaDir of deltaDirs) {
const files = await handler.list(deltaDir)
const deltaBackups = filter(files, isDeltaBackup)
backups.push(...mapToArray(
deltaBackups,
deltaBackup => {
return `${deltaDir}/${getDeltaBackupNameWithoutExt(deltaBackup)}`
}
))
}
return backups
}
async importVmBackup (remoteId, file, sr) {
const handler = await this._xo.getRemoteHandler(remoteId)
const stream = await handler.createReadStream(file)
const xapi = this._xo.getXapi(sr)
await xapi.importVm(stream, { srId: sr._xapiId })
}
// -----------------------------------------------------------------
@deferrable.onFailure
async deltaCopyVm ($onFailure, srcVm, targetSr) {
const srcXapi = this._xo.getXapi(srcVm)
const targetXapi = this._xo.getXapi(targetSr)
// Get Xen objects from XO objects.
srcVm = srcXapi.getObject(srcVm._xapiId)
targetSr = targetXapi.getObject(targetSr._xapiId)
// 1. Find the local base for this SR (if any).
const TAG_LAST_BASE_DELTA = `xo:base_delta:${targetSr.uuid}`
const localBaseUuid = (id => {
if (id != null) {
const base = srcXapi.getObject(id, null)
return base && base.uuid
}
})(srcVm.other_config[TAG_LAST_BASE_DELTA])
// 2. Copy.
const dstVm = await (async () => {
const delta = await srcXapi.exportDeltaVm(srcVm.$id, localBaseUuid, {
snapshotNameLabel: `XO_DELTA_EXPORT: ${targetSr.name_label} (${targetSr.uuid})`
})
$onFailure(async () => {
await Promise.all(mapToArray(
delta.streams,
stream => stream.cancel()
))
return srcXapi.deleteVm(delta.vm.$id, true)
})
const promise = targetXapi.importDeltaVm(
delta,
{
deleteBase: true, // Remove the remote base.
srId: targetSr.$id
}
)
// Once done, (asynchronously) remove the (now obsolete) local
// base.
if (localBaseUuid) {
promise.then(() => srcXapi.deleteVm(localBaseUuid, true)).catch(noop)
}
// (Asynchronously) Identify snapshot as future base.
promise.then(() => {
return srcXapi._updateObjectMapProperty(srcVm, 'other_config', {
[TAG_LAST_BASE_DELTA]: delta.vm.uuid
})
}).catch(noop)
return promise
})()
// 5. Return the identifier of the new XO VM object.
return xapiObjectToXo(dstVm).id
}
// -----------------------------------------------------------------
// TODO: The other backup methods must use this function !
// Prerequisite: The backups array must be ordered. (old to new backups)
async _removeOldBackups (backups, handler, dir, n) {
if (n <= 0) {
return
}
const getPath = (file, dir) => dir ? `${dir}/${file}` : file
await Promise.all(
mapToArray(backups.slice(0, n), async backup => await handler.unlink(getPath(backup, dir)))
)
}
// -----------------------------------------------------------------
async _legacyImportDeltaVdiBackup (xapi, { vmId, handler, dir, vdiInfo }) {
const vdi = await xapi.createVdi(vdiInfo.virtual_size, vdiInfo)
const vdiId = vdi.$id
// dir = vm_delta_xxx
// xoPath = vdi_xxx/timestamp_(full|delta).vhd
// vdiDir = vdi_xxx
const { xoPath } = vdiInfo
const filePath = `${dir}/${xoPath}`
const vdiDir = dirname(xoPath)
const backups = await this._listDeltaVdiDependencies(handler, filePath)
for (const backup of backups) {
const stream = await handler.createReadStream(`${dir}/${vdiDir}/${backup}`)
await xapi.importVdiContent(vdiId, stream, {
format: VDI_FORMAT_VHD
})
}
return vdiId
}
async _legacyImportDeltaVmBackup (xapi, { remoteId, handler, filePath, info, sr }) {
// Import vm metadata.
const vm = await (async () => {
const stream = await handler.createReadStream(`${filePath}.xva`)
return await xapi.importVm(stream, { onlyMetadata: true })
})()
const vmName = vm.name_label
const dir = dirname(filePath)
// Disable start and change the VM name label during import.
await Promise.all([
xapi.addForbiddenOperationToVm(vm.$id, 'start', 'Delta backup import...'),
xapi._setObjectProperties(vm, { name_label: `[Importing...] ${vmName}` })
])
// Destroy vbds if necessary. Why ?
// Because XenServer creates Vbds linked to the vdis of the backup vm if it exists.
await xapi.destroyVbdsFromVm(vm.uuid)
// Import VDIs.
const vdiIds = {}
await Promise.all(
mapToArray(
info.vdis,
async vdiInfo => {
vdiInfo.sr = sr._xapiId
const vdiId = await this._legacyImportDeltaVdiBackup(xapi, { vmId: vm.$id, handler, dir, vdiInfo })
vdiIds[vdiInfo.uuid] = vdiId
}
)
)
await Promise.all(
mapToArray(
info.vbds,
vbdInfo => {
xapi.attachVdiToVm(vdiIds[vbdInfo.xoVdi], vm.$id, vbdInfo)
}
)
)
// Import done, reenable start and set real vm name.
await Promise.all([
xapi.removeForbiddenOperationFromVm(vm.$id, 'start'),
xapi._setObjectProperties(vm, { name_label: vmName })
])
return vm
}
// -----------------------------------------------------------------
async _listVdiBackups (handler, dir) {
let files
try {
files = await handler.list(dir)
} catch (error) {
if (error.code === 'ENOENT') {
files = []
} else {
throw error
}
}
const backups = sortBy(filter(files, fileName => isVdiBackup(fileName)))
let i
// Avoid unstable state: No full vdi found to the beginning of array. (base)
for (i = 0; i < backups.length && isDeltaVdiBackup(backups[i]); i++);
await this._removeOldBackups(backups, handler, dir, i)
return backups.slice(i)
}
async _deltaVdiBackup (xapi, {vdi, handler, dir, depth}) {
const backupDirectory = `vdi_${vdi.uuid}`
dir = `${dir}/${backupDirectory}`
const backups = await this._listVdiBackups(handler, dir)
// Make snapshot.
const date = safeDateFormat(new Date())
const currentSnapshot = await xapi.snapshotVdi(vdi.$id, 'XO_DELTA_BASE_VDI_SNAPSHOT')
const bases = sortBy(
filter(vdi.$snapshots, { name_label: 'XO_DELTA_BASE_VDI_SNAPSHOT' }),
base => base.snapshot_time
)
const base = bases.pop()
// Remove old bases if exists.
Promise.all(mapToArray(bases, base => xapi.deleteVdi(base.$id))).catch(noop)
// It is strange to have no base but a full backup !
// A full is necessary if it not exists backups or
// the base is missing.
const isFull = (!backups.length || !base)
// Export full or delta backup.
const vdiFilename = `${date}_${isFull ? 'full' : 'delta'}.vhd`
const backupFullPath = `${dir}/${vdiFilename}`
try {
const sourceStream = await xapi.exportVdi(currentSnapshot.$id, {
baseId: isFull ? undefined : base.$id,
format: VDI_FORMAT_VHD
})
const targetStream = await handler.createOutputStream(backupFullPath, {
// FIXME: Checksum is not computed for full vdi backups.
// The problem is in the merge case, a delta merged in a full vdi
// backup forces us to browse the resulting file =>
// Significant transfer time on the network !
checksum: !isFull,
flags: 'wx'
})
sourceStream.on('error', error => targetStream.emit('error', error))
await Promise.all([
eventToPromise(sourceStream.pipe(targetStream), 'finish'),
sourceStream.task
])
} catch (error) {
// Remove new backup. (corrupt) and delete new vdi base.
xapi.deleteVdi(currentSnapshot.$id).catch(noop)
await handler.unlink(backupFullPath, { checksum: true }).catch(noop)
throw error
}
// Returns relative path. (subdir and vdi filename), old/new base.
return {
backupDirectory,
vdiFilename,
oldBaseId: base && base.$id, // Base can be undefined. (full backup)
newBaseId: currentSnapshot.$id
}
}
async _mergeDeltaVdiBackups ({handler, dir, depth}) {
if (handler.type === 'smb') {
throw new Error('VDI merging is not available through SMB')
}
const backups = await this._listVdiBackups(handler, dir)
let i = backups.length - depth
// No merge.
if (i <= 0) {
return
}
const vhdUtil = `${__dirname}/../../bin/vhd-util`
const timestamp = getVdiTimestamp(backups[i])
const newFullBackup = `${dir}/${timestamp}_full.vhd`
await checkFileIntegrity(handler, `${dir}/${backups[i]}`)
for (; i > 0 && isDeltaVdiBackup(backups[i]); i--) {
const backup = `${dir}/${backups[i]}`
const parent = `${dir}/${backups[i - 1]}`
const path = handler._remote.path // FIXME, private attribute !
try {
await checkFileIntegrity(handler, `${dir}/${backups[i - 1]}`)
await execa(vhdUtil, ['modify', '-n', `${path}/${backup}`, '-p', `${path}/${parent}`]) // FIXME not ok at least with smb remotes
await execa(vhdUtil, ['coalesce', '-n', `${path}/${backup}`]) // FIXME not ok at least with smb remotes
} catch (e) {
console.error('Unable to use vhd-util.', e)
throw e
}
await handler.unlink(backup, { checksum: true })
}
// The base was removed, it exists two full backups or more ?
// => Remove old backups before the most recent full.
if (i > 0) {
for (i--; i >= 0; i--) {
await handler.unlink(`${dir}/${backups[i]}`, { checksum: true })
}
return
}
// Rename the first old full backup to the new full backup.
await handler.rename(`${dir}/${backups[0]}`, newFullBackup)
}
async _listDeltaVdiDependencies (handler, filePath) {
const dir = dirname(filePath)
const filename = basename(filePath)
const backups = await this._listVdiBackups(handler, dir)
// Search file. (delta or full backup)
const i = findIndex(backups, backup =>
getVdiTimestamp(backup) === getVdiTimestamp(filename)
)
if (i === -1) {
throw new Error('VDI to import not found in this remote.')
}
// Search full backup.
let j
for (j = i; j >= 0 && isDeltaVdiBackup(backups[j]); j--);
if (j === -1) {
throw new Error(`Unable to found full vdi backup of: ${filePath}`)
}
return backups.slice(j, i + 1)
}
// -----------------------------------------------------------------
async _listDeltaVmBackups (handler, dir) {
const files = await handler.list(dir)
return await sortBy(filter(files, isDeltaBackup))
}
async _failedRollingDeltaVmBackup (xapi, handler, dir, fulFilledVdiBackups) {
await Promise.all(
mapToArray(fulFilledVdiBackups, async vdiBackup => {
const { newBaseId, backupDirectory, vdiFilename } = vdiBackup.value()
await xapi.deleteVdi(newBaseId)
await handler.unlink(`${dir}/${backupDirectory}/${vdiFilename}`, { checksum: true }).catch(noop)
})
)
}
async rollingDeltaVmBackup ({vm, remoteId, tag, depth}) {
const remote = await this._xo.getRemote(remoteId)
if (!remote) {
throw new Error(`No such Remote ${remoteId}`)
}
if (!remote.enabled) {
throw new Error(`Remote ${remoteId} is disabled`)
}
const handler = await this._xo.getRemoteHandler(remote)
if (handler.type === 'smb') {
throw new Error('Delta Backup is not supported for smb remotes')
}
const dir = `vm_delta_${tag}_${vm.uuid}`
const info = {
version: '1.0.0',
vbds: {},
vdis: {},
vifs: {}
}
const promises = []
const xapi = this._xo.getXapi(vm)
vm = xapi.getObject(vm._xapiId)
for (const vbd of vm.$VBDs) {
const vdiId = vbd.VDI
if (!vdiId || vbd.type !== 'Disk') {
continue
}
info.vbds[vbd.$ref] = vbd
// Warning: There may be the same VDI id for a VBD set.
if (info.vdis[vdiId]) {
continue
}
const vdi = vbd.$VDI
info.vdis[vdiId] = { ...vdi }
promises.push(
this._deltaVdiBackup(xapi, {vdi, handler, dir, depth}).then(
vdiBackup => {
const { backupDirectory, vdiFilename } = vdiBackup
info.vdis[vdiId].xoPath = `${backupDirectory}/${vdiFilename}`
return vdiBackup
}
)
)
}
const vdiBackups = await pSettle(promises)
const fulFilledVdiBackups = []
let fail = false
// One or many vdi backups have failed.
for (const vdiBackup of vdiBackups) {
if (vdiBackup.isFulfilled()) {
fulFilledVdiBackups.push(vdiBackup)
} else {
console.error(`Rejected backup: ${vdiBackup.reason()}`)
fail = true
}
}
if (fail) {
console.error(`Remove successful backups in ${dir}`, fulFilledVdiBackups)
await this._failedRollingDeltaVmBackup(xapi, handler, dir, fulFilledVdiBackups)
throw new Error('Rolling delta vm backup failed.')
}
const date = safeDateFormat(new Date())
const backupFormat = `${date}_${vm.name_label}`
const infoPath = `${dir}/${backupFormat}${DELTA_BACKUP_EXT}`
try {
for (const vif of vm.$VIFs) {
info.vifs[vif.$ref] = vif
}
info.vm = vm
await handler.outputFile(infoPath, JSON.stringify(info, null, 2), {flag: 'wx'})
} catch (e) {
await Promise.all([
handler.unlink(infoPath).catch(noop),
this._failedRollingDeltaVmBackup(xapi, handler, dir, fulFilledVdiBackups)
])
throw e
}
// Here we have a completed backup. We can merge old vdis.
await Promise.all(
mapToArray(vdiBackups, vdiBackup => {
const { backupDirectory } = vdiBackup.value()
return this._mergeDeltaVdiBackups({handler, dir: `${dir}/${backupDirectory}`, depth})
})
)
// Remove old vm backups.
const backups = await this._listDeltaVmBackups(handler, dir)
const nOldBackups = backups.length - depth
if (nOldBackups > 0) {
await Promise.all(
mapToArray(backups.slice(0, nOldBackups), async backup => {
// Remove json file.
await handler.unlink(`${dir}/${backup}`)
// Remove xva file.
// Version 0.0.0 (Legacy) Delta Backup.
handler.unlink(`${dir}/${getDeltaBackupNameWithoutExt(backup)}.xva`).catch(noop)
})
)
}
// Remove old vdi bases.
Promise.all(
mapToArray(vdiBackups, async vdiBackup => {
const { oldBaseId } = vdiBackup.value()
if (oldBaseId) {
await xapi.deleteVdi(oldBaseId)
}
})
).catch(noop)
// Returns relative path.
return `${dir}/${backupFormat}`
}
async importDeltaVmBackup ({sr, remoteId, filePath}) {
const handler = await this._xo.getRemoteHandler(remoteId)
const xapi = this._xo.getXapi(sr)
const delta = JSON.parse(await handler.readFile(`${filePath}${DELTA_BACKUP_EXT}`))
let vm
const { version } = delta
if (!version) {
// Legacy import. (Version 0.0.0)
vm = await this._legacyImportDeltaVmBackup(xapi, {
remoteId, handler, filePath, info: delta, sr
})
} else if (versionSatisfies(delta.version, '^1')) {
const basePath = dirname(filePath)
const streams = delta.streams = {}
await Promise.all(
mapToArray(
delta.vdis,
async (vdi, id) => {
const vdisFolder = `${basePath}/${dirname(vdi.xoPath)}`
const backups = await this._listDeltaVdiDependencies(handler, `${basePath}/${vdi.xoPath}`)
streams[`${id}.vhd`] = await Promise.all(mapToArray(backups, async backup =>
handler.createReadStream(`${vdisFolder}/${backup}`, { checksum: true, ignoreMissingChecksum: true })
))
}
)
)
vm = await xapi.importDeltaVm(delta, {
srId: sr._xapiId,
disableStartAfterImport: false
})
} else {
throw new Error(`Unsupported delta backup version: ${version}`)
}
return xapiObjectToXo(vm).id
}
// -----------------------------------------------------------------
async backupVm ({vm, remoteId, file, compress, onlyMetadata}) {
const remote = await this._xo.getRemote(remoteId)
if (!remote) {
throw new Error(`No such Remote ${remoteId}`)
}
if (!remote.enabled) {
throw new Error(`Backup remote ${remoteId} is disabled`)
}
const handler = await this._xo.getRemoteHandler(remote)
return this._backupVm(vm, handler, file, {compress, onlyMetadata})
}
async _backupVm (vm, handler, file, {compress, onlyMetadata}) {
const targetStream = await handler.createOutputStream(file, { flags: 'wx' })
const promise = eventToPromise(targetStream, 'finish')
const sourceStream = await this._xo.getXapi(vm).exportVm(vm._xapiId, {
compress,
onlyMetadata: onlyMetadata || false
})
sourceStream.pipe(targetStream)
await promise
}
async rollingBackupVm ({vm, remoteId, tag, depth, compress, onlyMetadata}) {
const remote = await this._xo.getRemote(remoteId)
if (!remote) {
throw new Error(`No such Remote ${remoteId}`)
}
if (!remote.enabled) {
throw new Error(`Backup remote ${remoteId} is disabled`)
}
const handler = await this._xo.getRemoteHandler(remote)
const files = await handler.list()
const reg = new RegExp('^[^_]+_' + escapeStringRegexp(`${tag}_${vm.name_label}.xva`))
const backups = sortBy(filter(files, (fileName) => reg.test(fileName)))
const date = safeDateFormat(new Date())
const file = `${date}_${tag}_${vm.name_label}.xva`
await this._backupVm(vm, handler, file, {compress, onlyMetadata})
await this._removeOldBackups(backups, handler, undefined, backups.length - (depth - 1))
}
async rollingSnapshotVm (vm, tag, depth) {
const xapi = this._xo.getXapi(vm)
vm = xapi.getObject(vm._xapiId)
const reg = new RegExp('^rollingSnapshot_[^_]+_' + escapeStringRegexp(tag) + '_')
const snapshots = sortBy(filter(vm.$snapshots, snapshot => reg.test(snapshot.name_label)), 'name_label')
const date = safeDateFormat(new Date())
await xapi.snapshotVm(vm.$id, `rollingSnapshot_${date}_${tag}_${vm.name_label}`)
const promises = []
for (let surplus = snapshots.length - (depth - 1); surplus > 0; surplus--) {
const oldSnap = snapshots.shift()
promises.push(xapi.deleteVm(oldSnap.uuid, true))
}
await Promise.all(promises)
}
async rollingDrCopyVm ({vm, sr, tag, depth}) {
tag = 'DR_' + tag
const reg = new RegExp('^' + escapeStringRegexp(`${vm.name_label}_${tag}_`) + '[0-9]{8}T[0-9]{6}Z$')
const targetXapi = this._xo.getXapi(sr)
sr = targetXapi.getObject(sr._xapiId)
const sourceXapi = this._xo.getXapi(vm)
vm = sourceXapi.getObject(vm._xapiId)
const vms = []
forEach(sr.$VDIs, vdi => {
const vbds = vdi.$VBDs
const vm = vbds && vbds[0] && vbds[0].$VM
if (vm && reg.test(vm.name_label)) {
vms.push(vm)
}
})
const olderCopies = sortBy(vms, 'name_label')
const copyName = `${vm.name_label}_${tag}_${safeDateFormat(new Date())}`
const drCopy = await sourceXapi.remoteCopyVm(vm.$id, targetXapi, sr.$id, {
nameLabel: copyName
})
await targetXapi.addTag(drCopy.$id, 'Disaster Recovery')
const promises = []
for (let surplus = olderCopies.length - (depth - 1); surplus > 0; surplus--) {
const oldDRVm = olderCopies.shift()
promises.push(targetXapi.deleteVm(oldDRVm.$id, true))
}
await Promise.all(promises)
}
}

76
src/xo-mixins/jobs.js Normal file
View File

@@ -0,0 +1,76 @@
import JobExecutor from '../job-executor'
import { Jobs } from '../models/job'
import {
JsonRpcError,
NoSuchObject
} from '../api-errors'
// ===================================================================
class NoSuchJob extends NoSuchObject {
constructor (id) {
super(id, 'job')
}
}
// ===================================================================
export default class {
constructor (xo) {
this._executor = new JobExecutor(xo)
this._jobs = new Jobs({
connection: xo._redis,
prefix: 'xo:job',
indexes: ['user_id', 'key']
})
}
async getAllJobs () {
return await this._jobs.get()
}
async getJob (id) {
const job = await this._jobs.first(id)
if (!job) {
throw new NoSuchJob(id)
}
return job.properties
}
async createJob (userId, job) {
// TODO: use plain objects
const job_ = await this._jobs.create(userId, job)
return job_.properties
}
async updateJob (job) {
return await this._jobs.save(job)
}
async removeJob (id) {
return await this._jobs.remove(id)
}
async runJobSequence (idSequence) {
const notFound = []
for (const id of idSequence) {
let job
try {
job = await this.getJob(id)
} catch (error) {
if (error instanceof NoSuchJob) {
notFound.push(id)
} else {
throw error
}
}
if (job) {
await this._executor.exec(job)
}
}
if (notFound.length > 0) {
throw new JsonRpcError(`The following jobs were not found: ${notFound.join()}`)
}
}
}

205
src/xo-mixins/plugins.js Normal file
View File

@@ -0,0 +1,205 @@
import createJsonSchemaValidator from 'is-my-json-valid'
import isFunction from 'lodash.isfunction'
import { PluginsMetadata } from '../models/plugin-metadata'
import {
InvalidParameters,
NoSuchObject
} from '../api-errors'
import {
createRawObject,
mapToArray
} from '../utils'
// ===================================================================
class NoSuchPlugin extends NoSuchObject {
constructor (id) {
super(id, 'plugin')
}
}
// ===================================================================
export default class {
constructor (xo) {
this._plugins = createRawObject()
this._pluginsMetadata = new PluginsMetadata({
connection: xo._redis,
prefix: 'xo:plugin-metadata'
})
}
_getRawPlugin (id) {
const plugin = this._plugins[id]
if (!plugin) {
throw new NoSuchPlugin(id)
}
return plugin
}
async _getPluginMetadata (id) {
const metadata = await this._pluginsMetadata.first(id)
return metadata
? metadata.properties
: null
}
async registerPlugin (
name,
instance,
configurationSchema
) {
const id = name
const plugin = this._plugins[id] = {
configured: !configurationSchema,
configurationSchema,
id,
instance,
name,
unloadable: isFunction(instance.unload)
}
const metadata = await this._getPluginMetadata(id)
let autoload = true
let configuration
if (metadata) {
({
autoload,
configuration
} = metadata)
} else {
console.log(`[NOTICE] register plugin ${name} for the first time`)
await this._pluginsMetadata.save({
id,
autoload
})
}
// Configure plugin if necessary. (i.e. configurationSchema)
// Load plugin.
// Ignore configuration and loading errors.
Promise.resolve()
.then(() => {
if (!plugin.configured) {
return this._configurePlugin(plugin, configuration)
}
})
.then(() => {
if (autoload) {
return this.loadPlugin(id)
}
})
.catch(error => {
console.error('register plugin %s: %s', name, error && error.stack || error)
})
}
async _getPlugin (id) {
const {
configurationSchema,
loaded,
name,
unloadable
} = this._getRawPlugin(id)
const {
autoload,
configuration
} = (await this._getPluginMetadata(id)) || {}
return {
id,
name,
autoload,
loaded,
unloadable,
configuration,
configurationSchema
}
}
async getPlugins () {
return await Promise.all(
mapToArray(this._plugins, ({ id }) => this._getPlugin(id))
)
}
// Validate the configuration and configure the plugin instance.
async _configurePlugin (plugin, configuration) {
if (!plugin.configurationSchema) {
throw new InvalidParameters('plugin not configurable')
}
const validate = createJsonSchemaValidator(plugin.configurationSchema)
if (!validate(configuration)) {
throw new InvalidParameters(validate.errors)
}
// Sets the plugin configuration.
await plugin.instance.configure({
// Shallow copy of the configuration object to avoid most of the
// errors when the plugin is altering the configuration object
// which is handed over to it.
...configuration
})
plugin.configured = true
}
// Validate the configuration, configure the plugin instance and
// save the new configuration.
async configurePlugin (id, configuration) {
const plugin = this._getRawPlugin(id)
await this._configurePlugin(plugin, configuration)
// Saves the configuration.
await this._pluginsMetadata.merge(id, { configuration })
}
async disablePluginAutoload (id) {
// TODO: handle case where autoload is already disabled.
await this._pluginsMetadata.merge(id, { autoload: false })
}
async enablePluginAutoload (id) {
// TODO: handle case where autoload is already enabled.
await this._pluginsMetadata.merge(id, { autoload: true })
}
async loadPlugin (id) {
const plugin = this._getRawPlugin(id)
if (plugin.loaded) {
throw new InvalidParameters('plugin already loaded')
}
if (!plugin.configured) {
throw new InvalidParameters('plugin not configured')
}
await plugin.instance.load()
plugin.loaded = true
}
async unloadPlugin (id) {
const plugin = this._getRawPlugin(id)
if (!plugin.loaded) {
throw new InvalidParameters('plugin already unloaded')
}
if (plugin.unloadable === false) {
throw new InvalidParameters('plugin cannot be unloaded')
}
await plugin.instance.unload()
plugin.loaded = false
}
async purgePluginConfiguration (id) {
await this._pluginsMetadata.merge(id, { configuration: undefined })
}
}

128
src/xo-mixins/remotes.js Normal file
View File

@@ -0,0 +1,128 @@
import RemoteHandlerLocal from '../remote-handlers/local'
import RemoteHandlerNfs from '../remote-handlers/nfs'
import RemoteHandlerSmb from '../remote-handlers/smb'
import {
forEach
} from '../utils'
import {
NoSuchObject
} from '../api-errors'
import {
Remotes
} from '../models/remote'
// ===================================================================
class NoSuchRemote extends NoSuchObject {
constructor (id) {
super(id, 'remote')
}
}
// ===================================================================
export default class {
constructor (xo) {
this._remotes = new Remotes({
connection: xo._redis,
prefix: 'xo:remote',
indexes: ['enabled']
})
xo.on('start', async () => {
await this.initRemotes()
await this.syncAllRemotes()
})
xo.on('stop', () => this.forgetAllRemotes())
}
async getRemoteHandler (remote) {
if (typeof remote === 'string') {
remote = await this.getRemote(remote)
}
const Handler = {
file: RemoteHandlerLocal,
smb: RemoteHandlerSmb,
nfs: RemoteHandlerNfs
}
const type = remote.url.split('://')[0]
if (!Handler[type]) {
throw new Error('Unhandled remote type')
}
return new Handler[type](remote)
}
async getAllRemotes () {
return this._remotes.get()
}
async _getRemote (id) {
const remote = await this._remotes.first(id)
if (!remote) {
throw new NoSuchRemote(id)
}
return remote
}
async getRemote (id) {
return (await this._getRemote(id)).properties
}
async createRemote ({name, url}) {
let remote = await this._remotes.create(name, url)
return await this.updateRemote(remote.get('id'), {enabled: true})
}
async updateRemote (id, {name, url, enabled, error}) {
const remote = await this._getRemote(id)
this._updateRemote(remote, {name, url, enabled, error})
const handler = await this.getRemoteHandler(remote.properties)
const props = await handler.sync()
this._updateRemote(remote, props)
return (await this._remotes.save(remote)).properties
}
_updateRemote (remote, {name, url, enabled, error}) {
if (name) remote.set('name', name)
if (url) remote.set('url', url)
if (enabled !== undefined) remote.set('enabled', enabled)
if (error) {
remote.set('error', error)
} else {
remote.set('error', '')
}
}
async removeRemote (id) {
const handler = await this.getRemoteHandler(id)
await handler.forget()
await this._remotes.remove(id)
}
// TODO: Should it be private?
async syncAllRemotes () {
const remotes = await this.getAllRemotes()
forEach(remotes, remote => {
this.updateRemote(remote.id, {})
})
}
// TODO: Should it be private?
async forgetAllRemotes () {
const remotes = await this.getAllRemotes()
for (let remote of remotes) {
try {
(await this.getRemoteHandler(remote)).forget()
} catch (_) {}
}
}
// TODO: Should it be private?
async initRemotes () {
const remotes = await this.getAllRemotes()
if (!remotes || !remotes.length) {
await this.createRemote({name: 'default', url: 'file://var/lib/xoa-backups'})
}
}
}

View File

@@ -0,0 +1,80 @@
import Scheduler from '../scheduler'
import { Schedules } from '../models/schedule'
import {
NoSuchObject
} from '../api-errors'
// ===================================================================
class NoSuchSchedule extends NoSuchObject {
constructor (id) {
super(id, 'schedule')
}
}
// ===================================================================
export default class {
constructor (xo) {
this._schedules = new Schedules({
connection: xo._redis,
prefix: 'xo:schedule',
indexes: ['user_id', 'job']
})
const scheduler = this._scheduler = new Scheduler(xo)
xo.on('start', () => scheduler._loadSchedules())
xo.on('stop', () => scheduler.disableAll())
}
get scheduler () {
return this._scheduler
}
async _getSchedule (id) {
const schedule = await this._schedules.first(id)
if (!schedule) {
throw new NoSuchSchedule(id)
}
return schedule
}
async getSchedule (id) {
return (await this._getSchedule(id)).properties
}
async getAllSchedules () {
return await this._schedules.get()
}
async createSchedule (userId, {job, cron, enabled, name}) {
const schedule_ = await this._schedules.create(userId, job, cron, enabled, name)
const schedule = schedule_.properties
if (this.scheduler) {
this.scheduler.add(schedule)
}
return schedule
}
async updateSchedule (id, {job, cron, enabled, name}) {
const schedule = await this._getSchedule(id)
if (job) schedule.set('job', job)
if (cron) schedule.set('cron', cron)
if (enabled !== undefined) schedule.set('enabled', enabled)
if (name !== undefined) schedule.set('name', name)
await this._schedules.save(schedule)
if (this.scheduler) {
this.scheduler.update(schedule.properties)
}
}
async removeSchedule (id) {
await this._schedules.remove(id)
if (this.scheduler) {
this.scheduler.remove(id)
}
}
}

1225
src/xo.js

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,8 @@
#!/usr/bin/env sh
# TODO: this generation should probably be automated and integrated
# into the build system.
set -e -u
cd "$(dirname "$(which "$0")")"
cd "${1:-$(dirname "$(which "$0")")}"
{
printf %s '//
@@ -14,13 +11,22 @@ cd "$(dirname "$(which "$0")")"
// It MUST be re-generated each time an API namespace (read file) is
// added or removed.
//
const defaults = {}
export default defaults
'
for f in *.js *.coffee
for f in *.js *.coffee */
do
[ -f "$f" ] || continue
base=${f%.*}
[ "$base" != index ] || continue
printf '%s\n' "export * as $base from './$base'"
printf '%s; %s\n%s\n' \
"import $base from './$base'" \
"defaults['$base'] = $base" \
"export * as $base from './$base'"
done | sort
} > index.js