Compare commits

..

185 Commits

Author SHA1 Message Date
Julien Fontanet
b42b3d1b01 feat(xo-server): 5.40.0 2019-04-26 16:27:15 +02:00
Julien Fontanet
a40d6f772a feat(complex-matcher): 0.6.0 2019-04-26 16:25:47 +02:00
Julien Fontanet
6e9bfd18d9 feat(xo-server-backup-reports): 0.16.0 2019-04-26 16:24:22 +02:00
Julien Fontanet
3b92dd0139 feat(scripts): bump-pkg 2019-04-26 16:17:28 +02:00
HamadaBrest
564d53610a fix(xo-web/editable): notify user when undo fails (#4157)
Fixes #3799
2019-04-26 11:25:23 +02:00
Pierre Donias
b4c7b8ac7f fix(xo-web/new-vm): typos (#4158)
Introduced by 7acd90307b
2019-04-25 14:44:25 +02:00
HamadaBrest
7acd90307b feat(xo-web/new-vm): network config box for cloud-init (#4150)
Fixes #3872
2019-04-24 17:04:54 +02:00
Julien Fontanet
d3ec76c19f feat(lint): add eslint-comments plugin 2019-04-19 16:27:11 +02:00
HamadaBrest
688cb20674 feat(xo-web/self): remove ID from end user resource sets and add it to Self UI (#4151)
Fixes #4100
2019-04-18 16:45:52 +02:00
HamadaBrest
c63be20bea fix(xo-web/home): J/K navigation loop (#4152)
Fixes #2793
2019-04-18 16:15:25 +02:00
Rajaa.BARHTAOUI
df36633223 feat(xo-web/vm/attach disk): warning if VDI is on another local SR (#4117)
See #3911

Show a warning message if the VM has a VDI sitting on a local SR and the user
select a VDI sitting on a local SR on a different host since the VM won't be
able to start
2019-04-18 16:00:24 +02:00
badrAZ
3597621d88 feat(xo-server-backup-reports): metadata report implementation (#4084) 2019-04-18 09:49:18 +02:00
Pierre Donias
8387684839 fix(xo-web/migrateVm): don't pass SR if same-pool migration (#4146)
Fixes #4145

Introduced by f581e93b88
2019-04-17 16:04:05 +02:00
Pierre Donias
f261f395f1 fix(xo-web/migrateVm): typo (#4147) 2019-04-17 15:31:20 +02:00
Rajaa.BARHTAOUI
f27170ff0e feat(xo-web/vm/disk): notify user before breaking action (#4035)
See #3911

- New disk: warning if the selected SR is local to another host than another VDI
- Migrate VDI (row action only): warning if the selected SR is local to another host than another VDI
2019-04-16 11:04:12 +02:00
Enishowk
d82c951db6 feat(home): use regexp for tags filtering (#4112)
Avoid substring false positives.

Fixes #4087
2019-04-16 10:31:39 +02:00
Rajaa.BARHTAOUI
41ca853e03 feat(xo-web/new-vm): warning on high resource consumption (#4127)
Fixes #4044
2019-04-15 14:26:17 +02:00
Julien Fontanet
a08d098265 chore: update dependencies 2019-04-15 09:54:58 +02:00
Rajaa.BARHTAOUI
875681b8ce fix(xo-web/New VM): template selector won't load (#3565)
Fixes #3265
2019-04-12 14:51:13 +02:00
Julien Fontanet
a03dcbbf55 feat(xo-server): make Helmet configurable (#4141) 2019-04-12 13:49:51 +02:00
badrAZ
97cabbbc69 chore(CHANGELOG): update next 2019-04-11 17:42:52 +02:00
badrAZ
13725a9e21 feat(xo-web): v5.39.1 2019-04-11 17:22:34 +02:00
badrAZ
f47df961f7 fix(xo-web/backup-ng): transfer/merge tasks not displayed in the logs (#4140)
Introduced by 865d2df124
2019-04-11 17:12:47 +02:00
badrAZ
2f644d5eeb chore(CHANGELOG): update next 2019-04-11 16:19:29 +02:00
badrAZ
4b292bb78c feat(xo-web): v5.39.0 2019-04-11 16:02:57 +02:00
badrAZ
804891cc81 feat(xo-server): v5.39.0 2019-04-11 16:00:36 +02:00
badrAZ
d335e06371 feat(vhd-lib): v0.6.1 2019-04-11 15:54:48 +02:00
badrAZ
477058ad23 feat(xo-vmdk-to-vhd): v0.1.7 2019-04-11 15:35:15 +02:00
badrAZ
eb3b68401d feat(xo-server/metadata-backups): reportWhen implementation (#4135) 2019-04-11 15:24:15 +02:00
badrAZ
865d2df124 feat(xo-web/metadata-backups): metadata logs implementation (#4014)
Fixes #4005
2019-04-11 12:00:33 +02:00
badrAZ
88160bae1d fix(xo-server,xo-web/metadata-backups): handle null retentions (#4133)
Introduced by fea5117ed8
2019-04-11 11:00:04 +02:00
Rajaa.BARHTAOUI
f581e93b88 feat(xo-web/vm): migrate modal improvements (#4121)
Fixes #3326

- auto-select default SR as main SR
- hide optional per-VDI SR selection
2019-04-11 10:13:40 +02:00
Rajaa.BARHTAOUI
21a7cf7158 fix(xo-web/menu/xoa): display icon when no notifications nor updates (#4068)
Fixes #4012
2019-04-10 14:35:58 +02:00
Rajaa.BARHTAOUI
5edee4bae0 feat(xo-web/dashboard/overview): display 'Report' for admin only (#4126)
Fixes #4123
2019-04-09 17:13:28 +02:00
Julien Fontanet
916ca5576a feat(xen-api/cli): everything in context
Example: `xapi.pool` → `pool`
2019-04-09 17:09:03 +02:00
Julien Fontanet
6c861bfd1f feat(xen-api): name record classes with types 2019-04-09 16:28:46 +02:00
Rajaa.BARHTAOUI
56961b55bd fix(xo-web/dashboard/health): fix 'an error has occurred' (#4132)
Fixes  #4128
2019-04-09 15:17:34 +02:00
badrAZ
cdcd7154ba fix(xo-web/backup-ng): only display full interval in case of delta (#4125) 2019-04-09 15:12:52 +02:00
badrAZ
654a2ee870 feat(xo-web/backup-ng): make backup list title clearer (#4129)
Fixes #4111
2019-04-09 09:44:09 +02:00
Julien Fontanet
903634073a chore: update dependencies 2019-04-09 08:56:15 +02:00
badrAZ
0d4818feb6 fix(xo-server/metadata-backups): various changes (#4114)
- fix uncompleted log if one of the backup fails
- fix the case of a backup with xo mode and pool mode, if one fails the other will not be executed.
- log xo/pool by remotes
- log a warning task if a pool or a remote is missed
- log a warning task if a backup is not properly deleted
2019-04-08 17:24:02 +02:00
Julien Fontanet
d6aa40679b feat(xo-server/_assertHealthyVdiChains): attach info to error 2019-04-05 15:48:12 +02:00
Jon Sands
b7cc31c94d feat(docs/metadata backup): add restore instructions (#4116) 2019-04-05 11:21:21 +02:00
Julien Fontanet
6860156b6f chore(CHANGELOG): v5.33.1 2019-04-04 14:39:32 +02:00
Julien Fontanet
29486c9ce2 feat(xo-server): 5.38.2 2019-04-04 14:20:46 +02:00
Julien Fontanet
7cfa6a5da4 feat(xen-api): v0.25.1 2019-04-04 14:01:59 +02:00
Julien Fontanet
2563be472b fix(xen-api/_interruptOnDisconnect): dont use Promise.race
`Promise.race()` leads to memory leaks if some promises are never resolved.

See nodejs/node#17469
2019-04-04 13:42:45 +02:00
Julien Fontanet
7289e856d9 chore(xen-api/_sessionCall): dont use _interruptOnDisconnect 2019-04-04 13:39:45 +02:00
Nicolas Raynaud
975de1954e feat(xo-web/vm-import): don't block the UI when dropping a big OVA file (#4018) 2019-04-04 10:59:44 +02:00
Julien Fontanet
95bcf0c080 fix(xo-web/vms/import): various fixes (#4118)
- dont swallow `importVm` error
- `importVms`: display errors on console
- dont redirect on failure
2019-04-04 10:10:45 +02:00
Enishowk
f900a5ef4f feat(xo-web/backup): add warning regarding DST (#4056)
Fixes #4042
2019-04-03 11:42:24 +02:00
badrAZ
7f1ab529ae feat(xo-server/metadata-backups): logs implementation (#4108)
See #4014
2019-04-02 15:53:12 +02:00
Julien Fontanet
49fc86e4b1 chore(xen-api): rewrite inject-event test CLI 2019-04-02 15:24:25 +02:00
Julien Fontanet
924aef84f1 chore: drop Node 4 support 2019-04-02 11:40:27 +02:00
Rajaa.BARHTAOUI
96e6e2b72a feat(xo-web/xoa): registration panel enhancements (#4104)
Fixes #4043

- Remove useless "Trial" title
- Make the "Start trial" button bigger
2019-04-02 11:39:27 +02:00
Enishowk
71997d4e65 feat(xo-web/remotes): expose mount options field for SMB (#4067)
Fixes #4063
2019-04-02 10:49:45 +02:00
Nicolas Raynaud
447f2f9506 fix(vhd-lib/createVhdStreamWithLength): handle empty VHD (#4107)
Fixes #4105
2019-04-01 16:53:02 +02:00
Julien Fontanet
79aef9024b chore(CHANGELOG): move packages after fixes 2019-03-29 16:45:51 +01:00
Julien Fontanet
fdf6f4fdf3 chore(CHANGELOG): add missing packages list 2019-03-29 16:38:59 +01:00
Julien Fontanet
4d1eaaaade feat(xo-server): 5.38.1 2019-03-29 16:38:06 +01:00
Julien Fontanet
bdad6c0f6d feat(xen-api): v0.25.0 2019-03-29 16:35:19 +01:00
Julien Fontanet
ff1ca5d933 feat(xen-api/call): 1 hour timeout 2019-03-29 16:26:36 +01:00
Julien Fontanet
2cf4c494a4 feat(xen-api/connect): handle disconnect 2019-03-29 16:21:19 +01:00
Julien Fontanet
95ac0a861a chore(xen-api/getObjectByUuid): explicit test 2019-03-29 16:13:10 +01:00
Julien Fontanet
746c301f39 feat(xen-api): expose objectsFetched signal 2019-03-29 16:12:39 +01:00
Julien Fontanet
6455b12b58 chore(xen-api): real status state 2019-03-29 16:10:04 +01:00
Julien Fontanet
485b8fe993 chore(xen-api): rework events watching (#4103) 2019-03-29 15:59:51 +01:00
Julien Fontanet
d7527f280c chore(xen-api): rework call methods (#4102) 2019-03-29 15:39:31 +01:00
Julien Fontanet
d57fa4375d chore(xen-api/signals): not disconnected when connecting 2019-03-29 15:27:37 +01:00
Julien Fontanet
d9e42c6625 chore(xen-api): remove unused property 2019-03-29 15:08:57 +01:00
badrAZ
28293d3fce chore(CHANGELOG): v5.33.0 2019-03-29 15:04:27 +01:00
badrAZ
d505401446 feat(xo-web): v5.38.0 2019-03-29 14:37:25 +01:00
badrAZ
fafc24aeae feat(xo-server): v5.38.0 2019-03-29 14:35:48 +01:00
badrAZ
f78ef0d208 feat(xo-server-usage-report): v0.7.2 2019-03-29 14:33:08 +01:00
badrAZ
8384cc3652 feat(@xen-orchestra/fs): v0.8.0 2019-03-29 14:27:25 +01:00
badrAZ
60aa18a229 feat(vhd-lib): v0.6.0 2019-03-29 14:11:09 +01:00
badrAZ
3d64b42a89 feat(xen-api): v0.24.6 2019-03-29 14:05:14 +01:00
badrAZ
b301997d4b feat(xo-web): ability to restore a metadata backup (#4023)
Fixes #4004
2019-03-29 13:54:54 +01:00
Enishowk
ab34743250 feat(xo-web/hosts): suggest XCP-ng as alternative to XS Free (#4094)
Fixes #4091
2019-03-29 11:59:52 +01:00
badrAZ
bc14a1d167 feat(xo-web/backup-ng): ability to set the full backup interval (#4099)
Fixes #1783
2019-03-29 11:43:37 +01:00
badrAZ
2886ec116f feat(xo-server/metadata-backups): ability to restore metadata backup (#4096)
See #4004
2019-03-29 11:21:03 +01:00
Julien Fontanet
c2beb2a5fa chore(server/backup-ng-logs): initial documentation 2019-03-29 11:03:34 +01:00
Nicolas Raynaud
d6ac10f527 feat(xo-web/vm-import): improve VM import wording (#4020) 2019-03-29 09:23:39 +01:00
Julien Fontanet
9dcd8a707a feat(xen-api): add connected/disconnected signals 2019-03-28 18:39:33 +01:00
Julien Fontanet
e1e97ef158 chore(xen-api): set empty sessionId to undefined instead of null 2019-03-28 18:39:28 +01:00
Julien Fontanet
5d6b37f81a fix(xen-api/connect): dont stay disconnecting on failure 2019-03-28 18:19:50 +01:00
Julien Fontanet
e1da08ba38 chore(xen-api/connect): assert initially disconnected 2019-03-28 18:19:18 +01:00
Julien Fontanet
1dfb50fefd feat(xo-server/backup): fullInterval setting (#4086)
See #4083
2019-03-28 18:10:05 +01:00
Julien Fontanet
5c06ebc9c8 feat(xen-api/{,dis}connect): dont fail if already in expected state 2019-03-28 17:38:12 +01:00
Julien Fontanet
52a9270fb0 feat(xen-api): coalesce connect calls 2019-03-28 17:30:26 +01:00
Julien Fontanet
82247d7422 chore(xen-api): various changes 2019-03-28 17:30:25 +01:00
Julien Fontanet
b34688043f chore(xen-api): rewrite barrier and createTask 2019-03-28 17:30:24 +01:00
Julien Fontanet
ce4bcbd19d chore(xen-api): move more methods 2019-03-28 17:30:24 +01:00
Pierre Donias
cde9a02c32 fix(xo-server,xo-web,xo-server-usage-report): patches (#4077)
See #2565
See #3655
Fixes #2188
Fixes #3777
Fixes #3783
Fixes #3934
Fixes support#1228
Fixes support#1338
Fixes support#1362

- mergeInto: fix auto-patching on XS < 7.2
- mergeInto: homogenize both the host and pool's patches
- correctly install specific patches
- XCP-ng: fix "xcp-ng-updater not installed" bug
2019-03-28 17:05:04 +01:00
Julien Fontanet
fe1da4ea12 chore(xen-api): _addObject → _addRecordToCache, _removeObject → _removeRecordFromCache 2019-03-28 16:17:53 +01:00
Julien Fontanet
a73306817b chore(xen-api): move more methods 2019-03-28 16:15:09 +01:00
Julien Fontanet
54e683d3d4 chore(xen-api): move getField to object handling helpers section 2019-03-28 16:01:10 +01:00
Enishowk
f49910ca82 feat(xo-web, xo-server): display link to pool (#4045)
Fixes #4041
2019-03-28 15:42:37 +01:00
Julien Fontanet
4052f7f736 chore(xen-api): regroup HTTP requests 2019-03-28 13:58:23 +01:00
Julien Fontanet
b47e097983 feat(xen-api/{get,put}Resource): add inactivity detection (#4090) 2019-03-28 13:55:56 +01:00
Julien Fontanet
e44dbfb2a4 fix(xen-api/examples): use isOpaqueRef private module 2019-03-28 13:30:08 +01:00
Julien Fontanet
7d69dd9400 fix(xen-api): add missing Babel plugin 2019-03-28 12:21:55 +01:00
Julien Fontanet
e6aae8fcfa chore(xen-api): regroup object handling helpers 2019-03-28 12:19:08 +01:00
Julien Fontanet
da800b3391 chore(xo-collection): minor improvements (#4089) 2019-03-28 12:15:04 +01:00
Julien Fontanet
3a574bcecc chore(xen-api): clean call/callAsync code 2019-03-28 12:14:03 +01:00
Julien Fontanet
1bb0e234e7 chore(xen-api): modularize (#4088) 2019-03-28 11:17:25 +01:00
Julien Fontanet
b7e14ebf2a fix(xo-server/snapshotVm): dont retry and unconditionaly clean (#4075)
Fixes #4074
2019-03-28 10:54:50 +01:00
Nicolas Raynaud
2af1207702 feat(vhd-lib,xo-server): guess VHD size on import (#3726) 2019-03-28 10:16:28 +01:00
Julien Fontanet
ecfed30e6e fix(xo-web/JSON schema object input): clear when un-use (#4076) 2019-03-28 10:05:15 +01:00
Enishowk
d06c3e3dd8 fix(xo-web/smart-backup): StringNode → RegExpNode to anchor strings (#4085)
Fixes #4078
2019-03-27 22:11:23 +01:00
Julien Fontanet
16b3fbeb16 fix(scripts/travis-tests): integration tests on branches 2019-03-27 15:45:16 +01:00
Julien Fontanet
0938804947 fix(xo-server/worker): forget remote after use (#4079)
Fixes xoa-support#1378
Fixes xoa-support#1384
Fixes xoa-support#1399
2019-03-27 10:52:42 +01:00
Julien Fontanet
851bcf9816 feat(xo-server/api): close connection when session expires (#4071)
See xoa-support#1389
2019-03-27 10:36:15 +01:00
Julien Fontanet
9f6fc785bc fix(xo-server/remotes): dont sync unnecessarily 2019-03-27 10:04:01 +01:00
Julien Fontanet
56636bf5d4 chore(xo-server/MultiKeyMap): better implementation (#4070)
No longer unnecessarily use a map per entry and avoid creating Node as much as possible.
2019-03-26 17:50:22 +01:00
Julien Fontanet
3899a65167 fix(xo-server/plugin.configure): properly merge previous config 2019-03-26 17:29:33 +01:00
Rajaa.BARHTAOUI
628e53c1c3 feat(xo-web/settings/plugins): display plugin count (#4050)
Fixes #4008
2019-03-26 16:55:55 +01:00
Enishowk
9fa424dd8d fix(xo-web/remotes): test if writeRate or readRate are defined (#4073)
Fixes #4072
2019-03-26 15:35:01 +01:00
Julien Fontanet
3e6f2eecfa chore(xo-server/index): allow console 2019-03-26 09:38:43 +01:00
Julien Fontanet
cc655c8ba8 feat(xo-server): pDebounceWithKey (#4066) 2019-03-25 17:36:41 +01:00
Julien Fontanet
78aa0474ee chore(ESLint): ban console logs 2019-03-25 17:02:40 +01:00
Enishowk
9caefa2f49 fix(xo-web/remotes): remove console.log (#4065) 2019-03-25 17:02:21 +01:00
Julien Fontanet
478726fa3b chore(xo-server/ensureArray): move into own module 2019-03-25 16:48:15 +01:00
Julien Fontanet
f64917ec52 feat(xo-server): configurable session validity (#4059)
See xoa-support#1389
2019-03-25 16:24:27 +01:00
badrAZ
2bc25f91c4 chore(xo-server/XapiStats): improve the cache implementation (#3859) 2019-03-25 10:45:54 +01:00
Julien Fontanet
623d7ffe2f feat(cloud config): Config Drive datasource as a fallback (#4053)
Fixes xoa-support#1179
2019-03-25 09:34:21 +01:00
Julien Fontanet
07510b5099 chore(xo-server/authenticateUser): better doc and explicit tests 2019-03-22 17:58:00 +01:00
Julien Fontanet
9f21f9a7bc chore(xo-server/authenticateUser): throws invalidCredentials instead of returning false 2019-03-22 17:56:58 +01:00
Julien Fontanet
93da70709e fix(xo-web): dont try reconnecting websocket on sign out 2019-03-22 17:34:46 +01:00
Julien Fontanet
00436e744a fix(xo-server/_parseDuration): add missing file
Related to 6baef2450
2019-03-22 16:19:09 +01:00
Julien Fontanet
1e642fc512 chore(xo-server/store): dont access xo._config 2019-03-22 15:45:09 +01:00
Julien Fontanet
6baef2450c feat(xo-server/authentication): configurable token validity 2019-03-22 15:29:11 +01:00
Julien Fontanet
600f34f85a fix(xo-server/console proxy): close socket on authentication error 2019-03-22 11:43:52 +01:00
Julien Fontanet
6c0c6bc5c4 fix(xo-server-recover-account): remove OTP (#4055)
Otherwise it's not possible to recover if the OTP setting has been lost.
2019-03-22 11:10:16 +01:00
Enishowk
fcd62ed3cd feat(remote): add read/write speeds on remote (#4015)
Fixes #3991
2019-03-21 18:25:03 +01:00
Julien Fontanet
785f2e3a6d chore: update Babel
Fixes #4052
2019-03-21 12:33:18 +01:00
Julien Fontanet
c2925f7c1e chore(xo-import-servers-csv): remove csv-parser types
Already available in csv-parser@2.2.0.
2019-03-21 12:31:05 +01:00
Jon Sands
60814d8b58 fix(docs): add pictures to manual seeding instructions (#4019) 2019-03-21 08:38:18 +01:00
Julien Fontanet
2dec448f2c chore: update dependencies 2019-03-20 11:26:05 +01:00
Pierre Donias
b71f4f6800 fix(xo-web/home): always sort by name_label as a secondary sort (#4047)
Fixes #3983
2019-03-20 09:43:06 +01:00
badrAZ
558083a916 chore(CHANGELOG): update next 2019-03-19 15:30:24 +01:00
badrAZ
d507ed9dff feat(xo-web): v5.37.0 2019-03-19 15:05:41 +01:00
badrAZ
7ed0242662 feat(xo-server): v5.37.0 2019-03-19 14:59:59 +01:00
badrAZ
d7b3d989d7 feat(xo-server-auth-google): v0.2.1 2019-03-19 14:51:25 +01:00
badrAZ
707b2f77f0 fix(xo-web/backup-ng): display compression only in full mode (#4021)
Fixes xoa-support#1346
2019-03-19 11:58:11 +01:00
Rajaa.BARHTAOUI
5ddbb76979 feat(xo-web/vm/disk): warning when SRs on 2 different hosts (#3969)
See #3911

Show a warning message when at least 2 VDIs attached to the VM are on 2 local SRs on 2 different hosts because the VM won't be able to start (NO_HOSTS_AVAILABLE)
2019-03-19 11:14:30 +01:00
Enishowk
97b0fe62d4 feat(xo-server/vm.delete): ensure suspend VDI is destroyed (#4038)
Fixes #4027
2019-03-18 10:29:54 +01:00
badrAZ
8ac9b2cdc7 fix(xo-server/xapi-stats): synchronize requests to improve caching (#4028)
Fixes #4017
2019-03-15 11:46:21 +01:00
badrAZ
bc4c1a13e6 chore(xo-server): remove deprecated syntax for decorator-synchronized (#4037) 2019-03-14 18:13:39 +01:00
Julien Fontanet
d3ec303ade feat(xo-server): properly streams NDJSON (#4030) 2019-03-14 11:21:09 +01:00
Rajaa.BARHTAOUI
6cfc2a1ba6 fix(CHANGELOG.unreleased): remove duplicate entry (#4034) 2019-03-13 10:54:40 +01:00
Enishowk
e15cadc863 feat(xo-web/home): add current page in url (#3999)
Fixes #3993
2019-03-13 08:54:30 +01:00
Julien Fontanet
2f9284c263 fix(xo-server/sample.config.toml): datadir is not in redis section 2019-03-12 22:39:37 +01:00
badrAZ
2465852fd6 chore(xo-web): rename ret. to retention (#4029) 2019-03-12 15:18:03 +01:00
badrAZ
a9f48a0d50 fix(xo-web/migrateVms): VM disks migrated to the wrong SR (#3987)
Fixes #3986
2019-03-12 14:36:56 +01:00
badrAZ
4ed0035c67 chore(xo-server/xapi-stats): add documentation (#4031) 2019-03-12 13:24:34 +01:00
Rajaa.BARHTAOUI
b66f2dfb80 feat(xo-web/vm/disks): same-pool SRs first in migrate selector (#3996)
Fixes #3945
2019-03-12 11:44:38 +01:00
Julien Fontanet
3cb155b129 feat(CHANGELOG.unreleased): add compression 2019-03-12 11:04:12 +01:00
Julien Fontanet
df7efc04e2 feat(backup NG logs): use NDJSON (#4026)
Fixes #4025
2019-03-12 11:02:15 +01:00
Rajaa.BARHTAOUI
a21a8457a4 feat(xo-web/new/vm): warning when SRs not on same host (#3967)
See #3911
2019-03-12 10:20:40 +01:00
Julien Fontanet
020955f535 chore(yarn.lock): refresh 2019-03-11 21:05:48 +01:00
Julien Fontanet
51f23a5f03 chore(xo-web): update otplib to 11.0.0 2019-03-11 19:07:51 +01:00
Julien Fontanet
d024319441 fix(xo-server-auth-google): update passport-google-oauth20 to 2.0.0 2019-03-11 19:07:34 +01:00
Julien Fontanet
f8f35938c0 feat(xo-server): set NODE_ENV to production 2019-03-11 18:52:25 +01:00
Julien Fontanet
2573ace368 feat(xo-server): enable HTTP compression 2019-03-11 18:27:05 +01:00
Rajaa.BARHTAOUI
6bf7269814 feat(xo-server,xo-web/VM): start delay (#4002)
Fixes #3909
2019-03-11 15:39:10 +01:00
Julien Fontanet
6695c7bf5e feat(CHANGELOG): 5.32.1 and 5.32.2 2019-03-11 15:23:51 +01:00
Julien Fontanet
44a83fd817 fix(docs/cr/seed): fix CLI package name 2019-03-06 19:23:33 +01:00
Enishowk
08ddfe0649 feat(VM creation): support automatic networks (#3958)
Fixes #3916
2019-03-06 14:46:22 +01:00
Enishowk
5ba170bf1f feat(xo-web/SR/disks): disable actions on unmanaged VDIs (#4000)
Fixes #3988
2019-03-06 09:40:07 +01:00
Julien Fontanet
8150d3110c fix(vhd-cli/repl): various fixes 2019-03-05 11:46:14 +01:00
Pierre Donias
312b33ae85 fix(xo-web/new-network): PIF should not be required (#4010)
Introduced by 7a2a88b7ad

Requiring a PIF prevented from creating private networks
2019-03-04 17:45:48 +01:00
Julien Fontanet
008eb995ed feat(vhd-cli): 0.3.0 2019-03-01 20:07:58 +01:00
Julien Fontanet
6d8848043c feat(vhd-cli): repl command 2019-03-01 20:00:18 +01:00
Julien Fontanet
cf572c0cc5 feat(xo-server): 5.36.3 2019-03-01 17:21:09 +01:00
Julien Fontanet
18cfa7dd29 feat(xen-api): 0.24.5 2019-03-01 17:20:19 +01:00
Julien Fontanet
72cac2bbd6 chore(xen-api/json-rpc): link to XenCenter code 2019-03-01 16:41:15 +01:00
Julien Fontanet
48ffa28e0b fix(xen-api/_watchEvents): timeout must be a float
Required by XML-RPC transport (XenServer < 7.3).
2019-03-01 16:39:49 +01:00
Julien Fontanet
2e6baeb95a feat(xo-server): 5.36.2 2019-03-01 13:53:28 +01:00
Julien Fontanet
3b5650dc1e feat(xen-api): 0.24.4 2019-03-01 13:52:26 +01:00
Julien Fontanet
3279728e4b chore(xen-api/events): prints errors 2019-03-01 13:42:13 +01:00
Julien Fontanet
fe0dcbacc5 fix(xen-api/_watchEvents): pTimeout expects milliseconds 2019-03-01 13:40:03 +01:00
Julien Fontanet
7c5d90fe40 feat(xo-server/createCloudInit): support network config (#3997)
* feat(xo-server/createCloudInit): support network config

See #3872

* Update index.js
2019-03-01 09:50:37 +01:00
marcpezin
944dad6e36 feat(docs): metadata backups (#4001) 2019-03-01 09:49:25 +01:00
Julien Fontanet
6713d3ec66 chore: update dependencies 2019-03-01 09:44:12 +01:00
183 changed files with 8486 additions and 4785 deletions

View File

@@ -1,5 +1,7 @@
module.exports = {
extends: [
'plugin:eslint-comments/recommended',
'standard',
'standard-jsx',
'prettier',
@@ -16,6 +18,16 @@ module.exports = {
$PropertyType: true,
$Shape: true,
},
overrides: [
{
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
rules: {
'no-console': 'off',
},
},
],
parser: 'babel-eslint',
parserOptions: {
ecmaFeatures: {
@@ -23,6 +35,7 @@ module.exports = {
},
},
rules: {
'no-console': ['error', { allow: ['warn', 'error'] }],
'no-var': 'error',
'node/no-extraneous-import': 'error',
'node/no-extraneous-require': 'error',

View File

@@ -46,6 +46,12 @@ const getConfig = (key, ...args) => {
: config
}
// some plugins must be used in a specific order
const pluginsOrder = [
'@babel/plugin-proposal-decorators',
'@babel/plugin-proposal-class-properties',
]
module.exports = function(pkg, plugins, presets) {
plugins === undefined && (plugins = {})
presets === undefined && (presets = {})
@@ -61,7 +67,13 @@ module.exports = function(pkg, plugins, presets) {
return {
comments: !__PROD__,
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
plugins: Object.keys(plugins)
.map(plugin => [plugin, plugins[plugin]])
.sort(([a], [b]) => {
const oA = pluginsOrder.indexOf(a)
const oB = pluginsOrder.indexOf(b)
return oA !== -1 && oB !== -1 ? oA - oB : a < b ? -1 : 1
}),
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
}
}

View File

@@ -16,6 +16,6 @@
},
"dependencies": {
"golike-defer": "^0.4.1",
"xen-api": "^0.24.3"
"xen-api": "^0.25.1"
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/fs",
"version": "0.7.1",
"version": "0.8.0",
"license": "AGPL-3.0",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
@@ -24,15 +24,15 @@
"@marsaud/smb2": "^0.13.0",
"@sindresorhus/df": "^2.1.0",
"@xen-orchestra/async-map": "^0.0.0",
"decorator-synchronized": "^0.3.0",
"decorator-synchronized": "^0.5.0",
"execa": "^1.0.0",
"fs-extra": "^7.0.0",
"get-stream": "^4.0.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.11.0",
"promise-toolbox": "^0.12.1",
"readable-stream": "^3.0.6",
"through2": "^3.0.0",
"tmp": "^0.0.33",
"tmp": "^0.1.0",
"xo-remote-parser": "^0.5.0"
},
"devDependencies": {
@@ -45,7 +45,7 @@
"async-iterator-to-stream": "^1.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"dotenv": "^6.1.0",
"dotenv": "^7.0.0",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2"
},

View File

@@ -25,6 +25,10 @@ type RemoteInfo = { used?: number, size?: number }
type File = FileDescriptor | string
const checksumFile = file => file + '.checksum'
const computeRate = (hrtime: number[], size: number) => {
const seconds = hrtime[0] + hrtime[1] / 1e9
return size / seconds
}
const DEFAULT_TIMEOUT = 6e5 // 10 min
@@ -362,18 +366,27 @@ export default class RemoteHandlerAbstract {
}
async test(): Promise<Object> {
const SIZE = 1024 * 1024 * 10
const testFileName = normalizePath(`${Date.now()}.test`)
const data = await fromCallback(cb => randomBytes(1024 * 1024, cb))
const data = await fromCallback(cb => randomBytes(SIZE, cb))
let step = 'write'
try {
const writeStart = process.hrtime()
await this._outputFile(testFileName, data, { flags: 'wx' })
const writeDuration = process.hrtime(writeStart)
step = 'read'
const readStart = process.hrtime()
const read = await this._readFile(testFileName, { flags: 'r' })
const readDuration = process.hrtime(readStart)
if (!data.equals(read)) {
throw new Error('output and input did not match')
}
return {
success: true,
writeRate: computeRate(writeDuration, SIZE),
readRate: computeRate(readDuration, SIZE),
}
} catch (error) {
return {

View File

@@ -290,9 +290,11 @@ handlers.forEach(url => {
describe('#test()', () => {
it('tests the remote appears to be working', async () => {
expect(await handler.test()).toEqual({
success: true,
})
const answer = await handler.test()
expect(answer.success).toBe(true)
expect(typeof answer.writeRate).toBe('number')
expect(typeof answer.readRate).toBe('number')
})
})

View File

@@ -27,11 +27,11 @@
">2%"
],
"engines": {
"node": ">=4"
"node": ">=6"
},
"dependencies": {
"lodash": "^4.17.4",
"promise-toolbox": "^0.11.0"
"promise-toolbox": "^0.12.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1,5 +1,110 @@
# ChangeLog
## **next** (2019-04-11)
### Enhancements
- [Settings/remotes] Expose mount options field for SMB [#4063](https://github.com/vatesfr/xen-orchestra/issues/4063) (PR [#4067](https://github.com/vatesfr/xen-orchestra/pull/4067))
- [Backup/Schedule] Add warning regarding DST when you add a schedule [#4042](https://github.com/vatesfr/xen-orchestra/issues/4042) (PR [#4056](https://github.com/vatesfr/xen-orchestra/pull/4056))
- [Import] Avoid blocking the UI when dropping a big OVA file on the UI (PR [#4018](https://github.com/vatesfr/xen-orchestra/pull/4018))
- [Backup NG/Overview] Make backup list title clearer [#4111](https://github.com/vatesfr/xen-orchestra/issues/4111) (PR [#4129](https://github.com/vatesfr/xen-orchestra/pull/4129))
- [Dashboard] Hide "Report" section for non-admins [#4123](https://github.com/vatesfr/xen-orchestra/issues/4123) (PR [#4126](https://github.com/vatesfr/xen-orchestra/pull/4126))
- [VM migration] Auto select default SR and collapse optional actions [#3326](https://github.com/vatesfr/xen-orchestra/issues/3326) (PR [#4121](https://github.com/vatesfr/xen-orchestra/pull/4121))
- [Metadata backup] Logs [#4005](https://github.com/vatesfr/xen-orchestra/issues/4005) (PR [#4014](https://github.com/vatesfr/xen-orchestra/pull/4014))
### Bug fixes
- [Continuous Replication] Fix VHD size guess for empty files [#4105](https://github.com/vatesfr/xen-orchestra/issues/4105) (PR [#4107](https://github.com/vatesfr/xen-orchestra/pull/4107))
- [Backup NG] Only display full backup interval in case of a delta backup (PR [#4125](https://github.com/vatesfr/xen-orchestra/pull/4107))
- [Dashboard/Health] fix 'an error has occurred' on the storage state table [#4128](https://github.com/vatesfr/xen-orchestra/issues/4128) (PR [#4132](https://github.com/vatesfr/xen-orchestra/pull/4132))
- [Menu] XOA: Fixed empty slot when menu is collapsed [#4012](https://github.com/vatesfr/xen-orchestra/issues/4012) (PR [#4068](https://github.com/vatesfr/xen-orchestra/pull/4068)
### Released packages
- xo-vmdk-to-vhd v0.1.7
- vhd-lib v0.6.1
- xo-server v5.39.0
- xo-web v5.39.1
## **5.33.1** (2019-04-04)
### Bug fix
- Fix major memory leak [2563be4](https://github.com/vatesfr/xen-orchestra/commit/2563be472bfd84c6ed867efd21c4aeeb824d387f)
### Released packages
- xen-api v0.25.1
- xo-server v5.38.2
## **5.33.0** (2019-03-29)
### Enhancements
- [SR/Disk] Disable actions on unmanaged VDIs [#3988](https://github.com/vatesfr/xen-orchestra/issues/3988) (PR [#4000](https://github.com/vatesfr/xen-orchestra/pull/4000))
- [Pool] Specify automatic networks on a Pool [#3916](https://github.com/vatesfr/xen-orchestra/issues/3916) (PR [#3958](https://github.com/vatesfr/xen-orchestra/pull/3958))
- [VM/advanced] Manage start delay for VM [#3909](https://github.com/vatesfr/xen-orchestra/issues/3909) (PR [#4002](https://github.com/vatesfr/xen-orchestra/pull/4002))
- [New/Vm] SR section: Display warning message when the selected SRs aren't in the same host [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3967](https://github.com/vatesfr/xen-orchestra/pull/3967))
- Enable compression for HTTP requests (and initial objects fetch)
- [VDI migration] Display same-pool SRs first in the selector [#3945](https://github.com/vatesfr/xen-orchestra/issues/3945) (PR [#3996](https://github.com/vatesfr/xen-orchestra/pull/3996))
- [Home] Save the current page in url [#3993](https://github.com/vatesfr/xen-orchestra/issues/3993) (PR [#3999](https://github.com/vatesfr/xen-orchestra/pull/3999))
- [VDI] Ensure suspend VDI is destroyed when destroying a VM [#4027](https://github.com/vatesfr/xen-orchestra/issues/4027) (PR [#4038](https://github.com/vatesfr/xen-orchestra/pull/4038))
- [VM/disk]: Warning when 2 VDIs are on 2 different hosts' local SRs [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3969](https://github.com/vatesfr/xen-orchestra/pull/3969))
- [Remotes] Benchmarks (read and write rate speed) added when remote is tested [#3991](https://github.com/vatesfr/xen-orchestra/issues/3991) (PR [#4015](https://github.com/vatesfr/xen-orchestra/pull/4015))
- [Cloud Config] Support both NoCloud and Config Drive 2 datasources for maximum compatibility (PR [#4053](https://github.com/vatesfr/xen-orchestra/pull/4053))
- [Advanced] Configurable cookie validity (PR [#4059](https://github.com/vatesfr/xen-orchestra/pull/4059))
- [Plugins] Display number of installed plugins [#4008](https://github.com/vatesfr/xen-orchestra/issues/4008) (PR [#4050](https://github.com/vatesfr/xen-orchestra/pull/4050))
- [Continuous Replication] Opt-in mode to guess VHD size, should help with XenServer 7.1 CU2 and various `VDI_IO_ERROR` errors (PR [#3726](https://github.com/vatesfr/xen-orchestra/pull/3726))
- [VM/Snapshots] Always delete broken quiesced snapshots [#4074](https://github.com/vatesfr/xen-orchestra/issues/4074) (PR [#4075](https://github.com/vatesfr/xen-orchestra/pull/4075))
- [Settings/Servers] Display link to pool [#4041](https://github.com/vatesfr/xen-orchestra/issues/4041) (PR [#4045](https://github.com/vatesfr/xen-orchestra/pull/4045))
- [Import] Change wording of drop zone (PR [#4020](https://github.com/vatesfr/xen-orchestra/pull/4020))
- [Backup NG] Ability to set the interval of the full backups [#1783](https://github.com/vatesfr/xen-orchestra/issues/1783) (PR [#4083](https://github.com/vatesfr/xen-orchestra/pull/4083))
- [Hosts] Display a warning icon if you have XenServer license restrictions [#4091](https://github.com/vatesfr/xen-orchestra/issues/4091) (PR [#4094](https://github.com/vatesfr/xen-orchestra/pull/4094))
- [Restore] Ability to restore a metadata backup [#4004](https://github.com/vatesfr/xen-orchestra/issues/4004) (PR [#4023](https://github.com/vatesfr/xen-orchestra/pull/4023))
- Improve connection to XCP-ng/XenServer hosts:
- never disconnect by itself even in case of errors
- never stop watching events
### Bug fixes
- [New network] PIF was wrongly required which prevented from creating a private network (PR [#4010](https://github.com/vatesfr/xen-orchestra/pull/4010))
- [Google authentication] Migrate to new endpoint
- [Backup NG] Better handling of huge logs [#4025](https://github.com/vatesfr/xen-orchestra/issues/4025) (PR [#4026](https://github.com/vatesfr/xen-orchestra/pull/4026))
- [Home/VM] Bulk migration: fixed VM VDIs not migrated to the selected SR [#3986](https://github.com/vatesfr/xen-orchestra/issues/3986) (PR [#3987](https://github.com/vatesfr/xen-orchestra/pull/3987))
- [Stats] Fix cache usage with simultaneous requests [#4017](https://github.com/vatesfr/xen-orchestra/issues/4017) (PR [#4028](https://github.com/vatesfr/xen-orchestra/pull/4028))
- [Backup NG] Fix compression displayed for the wrong backup mode (PR [#4021](https://github.com/vatesfr/xen-orchestra/pull/4021))
- [Home] Always sort the items by their names as a secondary sort criteria [#3983](https://github.com/vatesfr/xen-orchestra/issues/3983) (PR [#4047](https://github.com/vatesfr/xen-orchestra/pull/4047))
- [Remotes] Fixes `spawn mount EMFILE` error during backup
- Properly redirect to sign in page instead of being stuck in a refresh loop
- [Backup-ng] No more false positives when list matching VMs on Home page [#4078](https://github.com/vatesfr/xen-orchestra/issues/4078) (PR [#4085](https://github.com/vatesfr/xen-orchestra/pull/4085))
- [Plugins] Properly remove optional settings when unchecking _Fill information_ (PR [#4076](https://github.com/vatesfr/xen-orchestra/pull/4076))
- [Patches] (PR [#4077](https://github.com/vatesfr/xen-orchestra/pull/4077))
- Add a host to a pool: fixes the auto-patching of the host on XenServer < 7.2 [#3783](https://github.com/vatesfr/xen-orchestra/issues/3783)
- Add a host to a pool: homogenizes both the host and **pool**'s patches [#2188](https://github.com/vatesfr/xen-orchestra/issues/2188)
- Safely install a subset of patches on a pool [#3777](https://github.com/vatesfr/xen-orchestra/issues/3777)
- XCP-ng: no longer requires to run `yum install xcp-ng-updater` when it's already installed [#3934](https://github.com/vatesfr/xen-orchestra/issues/3934)
### Released packages
- xen-api v0.25.0
- vhd-lib v0.6.0
- @xen-orchestra/fs v0.8.0
- xo-server-usage-report v0.7.2
- xo-server v5.38.1
- xo-web v5.38.0
## **5.32.2** (2019-02-28)
### Bug fixes
- Fix XAPI events monitoring on old version (XenServer 7.2)
## **5.32.1** (2019-02-28)
### Bug fixes
- Fix a very short timeout in the monitoring of XAPI events which may lead to unresponsive XenServer hosts
## **5.32.0** (2019-02-28)
### Enhancements

View File

@@ -2,9 +2,21 @@
### Enhancements
- [Self/New VM] Display confirmation modal when user will use a large amount of resources [#4044](https://github.com/vatesfr/xen-orchestra/issues/4044) (PR [#4127](https://github.com/vatesfr/xen-orchestra/pull/4127))
- [Home] No more false positives when select Tag on Home page [#4087](https://github.com/vatesfr/xen-orchestra/issues/4087) (PR [#4112](https://github.com/vatesfr/xen-orchestra/pull/4112))
- [VDI migration, New disk] Warning when SR host is different from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4035](https://github.com/vatesfr/xen-orchestra/pull/4035))
- [Backup reports] Support metadata backups (PR [#4084](https://github.com/vatesfr/xen-orchestra/pull/4084))
- [Attach disk] Display warning message when VDI SR is on different host from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4117](https://github.com/vatesfr/xen-orchestra/pull/4117))
- [Self/New VM] Add network config box to custom cloud-init [#3872](https://github.com/vatesfr/xen-orchestra/issues/3872) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4150))
- [Editable] Notify user when editable undo fails [#3799](https://github.com/vatesfr/xen-orchestra/issues/3799) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4157))
### Bug fixes
- [Self/New VM] Fix missing templates when refreshing page [#3265](https://github.com/vatesfr/xen-orchestra/issues/3265) (PR [#3565](https://github.com/vatesfr/xen-orchestra/pull/3565))
### Released packages
- xo-server v5.37.0
- xo-web v5.37.0
- xo-server-backup-reports v0.16.0
- complex-matcher v0.6.0
- xo-server v5.40.0
- xo-web v5.40.0

View File

@@ -33,6 +33,7 @@
* [Disaster recovery](disaster_recovery.md)
* [Smart Backup](smart_backup.md)
* [File level Restore](file_level_restore.md)
* [Metadata Backup](metadata_backup.md)
* [Backup Concurrency](concurrency.md)
* [Configure backup reports](backup_reports.md)
* [Backup troubleshooting](backup_troubleshooting.md)

BIN
docs/assets/cr-seed-1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

BIN
docs/assets/cr-seed-2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

BIN
docs/assets/cr-seed-3.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

BIN
docs/assets/cr-seed-4.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

BIN
docs/assets/metadata-1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.4 KiB

BIN
docs/assets/metadata-2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

BIN
docs/assets/metadata-3.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

BIN
docs/assets/metadata-4.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

BIN
docs/assets/metadata-5.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

BIN
docs/assets/metadata-6.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

BIN
docs/assets/metadata-7.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -12,7 +12,9 @@ Another good way to check if there is activity is the XOA VM stats view (on the
### VDI chain protection
This means your previous VM disks and snapshots should be "merged" (*coalesced* in the XenServer world) before we can take a new snapshot. This mechanism is handled by XenServer itself, not Xen Orchestra. But we can check your existing VDI chain and avoid creating more snapshots than your storage can merge. Otherwise, this will lead to catastrophic consequences. Xen Orchestra is the **only** XenServer/XCP backup product dealing with this.
Backup jobs regularly delete snapshots. When a snapshot is deleted, either manually or via a backup job, it triggers the need for Xenserver to coalesce the VDI chain - to merge the remaining VDIs and base copies in the chain. This means generally we cannot take too many new snapshots on said VM until Xenserver has finished running a coalesce job on the VDI chain.
This mechanism and scheduling is handled by XenServer itself, not Xen Orchestra. But we can check your existing VDI chain and avoid creating more snapshots than your storage can merge. If we don't, this will lead to catastrophic consequences. Xen Orchestra is the **only** XenServer/XCP backup product that takes this into account and offers protection.
Without this detection, you could have 2 potential issues:
@@ -21,9 +23,9 @@ Without this detection, you could have 2 potential issues:
The first issue is a chain that contains more than 30 elements (fixed XenServer limit), and the other one means it's full because the "coalesce" process couldn't keep up the pace and the storage filled up.
In the end, this message is a **protection mechanism against damaging your SR**. The backup job will fail, but XenServer itself should eventually automatically coalesce the snapshot chain, and the the next time the backup job should complete.
In the end, this message is a **protection mechanism preventing damage to your SR**. The backup job will fail, but XenServer itself should eventually automatically coalesce the snapshot chain, and the the next time the backup job should complete.
Just remember this: **coalesce will happen every time a snapshot is removed**.
Just remember this: **a coalesce should happen every time a snapshot is removed**.
> You can read more on this on our dedicated blog post regarding [XenServer coalesce detection](https://xen-orchestra.com/blog/xenserver-coalesce-detection-in-xen-orchestra/).
@@ -37,7 +39,9 @@ Coalesce jobs can also fail to run if the SR does not have enough free space. Ch
You can check if a coalesce job is currently active by running `ps axf | grep vhd` on the XenServer host and looking for a VHD process in the results (one of the resulting processes will be the grep command you just ran, ignore that one).
If you don't see any running coalesce jobs, and can't find any other reason that XenServer has not started one, you can attempt to make it start a coalesce job by rescanning the SR. This is harmless to try, but will not always result in a coalesce. Visit the problematic SR in the XOA UI, then click the "Rescan All Disks" button towards the top right: it looks like a refresh circle icon. This should begin the coalesce process - if you click the Advanced tab in the SR view, the "disks needing to be coalesced" list should become smaller and smaller.
If you don't see any running coalesce jobs, and can't find any other reason that XenServer has not started one, you can attempt to make it start a coalesce job by rescanning the SR. This is harmless to try, but will not always result in a coalesce. Visit the problematic SR in the XOA UI, then click the "Rescan All Disks" button towards the top right: it looks like a refresh circle icon. This should begin the coalesce process - if you click the Advanced tab in the SR view, the "disks needing to be coalesced" list should become smaller and smaller.
As a last resort, migrating the VM (more specifically, its disks) to a new storage repository will also force a coalesce and solve this issue. That means migrating a VM to another host (with its own storage) and back will force the VDI chain for that VM to be coalesced, and get rid of the `VDI Chain Protection` message.
### Parse Error

View File

@@ -10,6 +10,7 @@ There are several ways to protect your VMs:
* [Rolling Snapshots](rolling_snapshots.md) [*Starter Edition*]
* [Delta Backups](delta_backups.md) (best of both previous ones) [*Enterprise Edition*]
* [Disaster Recovery](disaster_recovery.md) [*Enterprise Edition*]
* [Metadata Backups](metadata_backup.md) [*Enterprise Edition*]
* [Continuous Replication](continuous_replication.md) [*Premium Edition*]
* [File Level Restore](file_level_restore.md) [*Premium Edition*]

View File

@@ -43,11 +43,19 @@ To protect the replication, we removed the possibility to boot your copied VM di
### Job creation
Create the Continuous Replication backup job, and leave it disabled for now. On the main Backup-NG page, note its identifiers, the main `backupJobId` and the ID of one on the schedules for the job, `backupScheduleId`.
Create the Continuous Replication backup job, and leave it disabled for now. On the main Backup-NG page, copy the job's `backupJobId` by hovering to the left of the shortened ID and clicking the copy to clipboard button:
![](./assets/cr-seed-1.png)
Copy it somewhere temporarily. Now we need to also copy the ID of the job schedule, `backupScheduleId`. Do this by hovering over the schedule name in the same panel as before, and clicking the copy to clipboard button. Keep it with the `backupJobId` you copied previously as we will need them all later:
![](./assets/cr-seed-2.png)
### Seed creation
Manually create a snapshot on the VM to backup, and note its UUID as `snapshotUuid` from the snapshot panel for the VM.
Manually create a snapshot on the VM being backed up, then copy this snapshot UUID, `snapshotUuid` from the snapshot panel of the VM:
![](./assets/cr-seed-3.png)
> DO NOT ever delete or alter this snapshot, feel free to rename it to make that clear.
@@ -55,7 +63,9 @@ Manually create a snapshot on the VM to backup, and note its UUID as `snapshotUu
Export this snapshot to a file, then import it on the target SR.
Note the UUID of this newly created VM as `targetVmUuid`.
We need to copy the UUID of this newly created VM as well, `targetVmUuid`:
![](./assets/cr-seed-4.png)
> DO not start this VM or it will break the Continuous Replication job! You can rename this VM to more easily remember this.
@@ -66,7 +76,7 @@ The XOA backup system requires metadata to correctly associate the source snapsh
First install the tool (all the following is done from the XOA VM CLI):
```
npm i -g xo-cr-seed
sudo npm i -g --unsafe-perm @xen-orchestra/cr-seed-cli
```
Here is an example of how the utility expects the UUIDs and info passed to it:

48
docs/metadata_backup.md Normal file
View File

@@ -0,0 +1,48 @@
# Metadata backup
> WARNING: Metadata backup is an experimental feature. Unexpected issues are possible, but unlikely.
## Introduction
XCP-ng and Citrix Hypervisor (Xenserver) hosts use a database to store metadata about VMs and their associated resources such as storage and networking. Metadata forms this complete view of all VMs available on your pool. Backing up the metadata of your pool allows you to recover from a physical hardware failure scenario in which you lose your hosts without losing your storage (SAN, NAS...).
In Xen Orchestra, Metadata backup is divided into two different options:
* Pool metadata backup
* XO configuration backup
### Performing a backup
In the backup job section, when creating a new backup job, you will now have a choice between backing up VMs and backing up Metadata:
![](./assets/metadata-1.png)
When you select Metadata backup, you will have a new backup job screen, letting you choose between a pool metadata backup and an XO configuration backup (or both at the same time):
![](./assets/metadata-2.png)
Define the name and retention for the job.
![](./assets/metadata-3.png)
Once created, the job is displayed with the other classic jobs.
![](./assets/metadata-4.png)
### Performing a restore
> WARNING: restoring pool metadata completely overwrites the XAPI database of a host. Only perform a metadata restore if it is a new server with nothing running on it (eg replacing a host with new hardware).
If you browse to the Backup NG Restore panel, you will now notice a Metadata filter button:
![](./assets/metadata-5.png)
If you click this button, it will show you Metadata backups available for restore:
![](./assets/metadata-6.png)
You can see both our Xen Orchestra config backup, and our pool metadata backup. To restore one, simply click the blue restore arrow, choose a backup date to restore, and click OK:
![](./assets/metadata-7.png)
That's it!

View File

@@ -4,25 +4,26 @@
"@babel/register": "^7.0.0",
"babel-core": "^7.0.0-0",
"babel-eslint": "^10.0.1",
"babel-jest": "^23.0.1",
"babel-jest": "^24.1.0",
"benchmark": "^2.1.4",
"eslint": "^5.1.0",
"eslint-config-prettier": "^3.3.0",
"eslint-config-prettier": "^4.1.0",
"eslint-config-standard": "12.0.0",
"eslint-config-standard-jsx": "^6.0.2",
"eslint-plugin-eslint-comments": "^3.1.1",
"eslint-plugin-import": "^2.8.0",
"eslint-plugin-node": "^8.0.0",
"eslint-plugin-promise": "^4.0.0",
"eslint-plugin-react": "^7.6.1",
"eslint-plugin-standard": "^4.0.0",
"exec-promise": "^0.7.0",
"flow-bin": "^0.90.0",
"flow-bin": "^0.97.0",
"globby": "^9.0.0",
"husky": "^1.2.1",
"jest": "^23.0.1",
"jest": "^24.1.0",
"lodash": "^4.17.4",
"prettier": "^1.10.2",
"promise-toolbox": "^0.11.0",
"promise-toolbox": "^0.12.1",
"sorted-object": "^2.0.1"
},
"engines": {

View File

@@ -1,6 +1,6 @@
{
"name": "complex-matcher",
"version": "0.5.0",
"version": "0.6.0",
"license": "ISC",
"description": "",
"keywords": [],
@@ -25,7 +25,7 @@
">2%"
],
"engines": {
"node": ">=4"
"node": ">=6"
},
"dependencies": {
"lodash": "^4.17.4"

View File

@@ -599,6 +599,13 @@ export const parse = parser.parse.bind(parser)
// -------------------------------------------------------------------
const _extractStringFromRegexp = child => {
const unescapedRegexp = child.re.source.replace(/^(\^)|\\|\$$/g, '')
if (child.re.source === `^${escapeRegExp(unescapedRegexp)}$`) {
return unescapedRegexp
}
}
const _getPropertyClauseStrings = ({ child }) => {
if (child instanceof Or) {
const strings = []
@@ -606,6 +613,12 @@ const _getPropertyClauseStrings = ({ child }) => {
if (child instanceof StringNode) {
strings.push(child.value)
}
if (child instanceof RegExpNode) {
const unescapedRegexp = _extractStringFromRegexp(child)
if (unescapedRegexp !== undefined) {
strings.push(unescapedRegexp)
}
}
})
return strings
}
@@ -613,6 +626,12 @@ const _getPropertyClauseStrings = ({ child }) => {
if (child instanceof StringNode) {
return [child.value]
}
if (child instanceof RegExpNode) {
const unescapedRegexp = _extractStringFromRegexp(child)
if (unescapedRegexp !== undefined) {
return [unescapedRegexp]
}
}
return []
}

View File

@@ -12,10 +12,13 @@ import {
} from './'
it('getPropertyClausesStrings', () => {
const tmp = getPropertyClausesStrings(parse('foo bar:baz baz:|(foo bar)'))
const tmp = getPropertyClausesStrings(
parse('foo bar:baz baz:|(foo bar /^boo$/ /^far$/) foo:/^bar$/')
)
expect(tmp).toEqual({
bar: ['baz'],
baz: ['foo', 'bar'],
baz: ['foo', 'bar', 'boo', 'far'],
foo: ['bar'],
})
})

View File

@@ -25,7 +25,7 @@
">2%"
],
"engines": {
"node": ">=4"
"node": ">=6"
},
"dependencies": {},
"devDependencies": {

View File

@@ -1,6 +1,6 @@
{
"name": "vhd-cli",
"version": "0.2.0",
"version": "0.3.0",
"license": "ISC",
"description": "",
"keywords": [],
@@ -27,12 +27,12 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/fs": "^0.7.1",
"@xen-orchestra/fs": "^0.8.0",
"cli-progress": "^2.0.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"struct-fu": "^1.2.0",
"vhd-lib": "^0.5.1"
"vhd-lib": "^0.6.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -42,9 +42,9 @@
"cross-env": "^5.1.3",
"execa": "^1.0.0",
"index-modules": "^0.3.0",
"promise-toolbox": "^0.11.0",
"promise-toolbox": "^0.12.1",
"rimraf": "^2.6.1",
"tmp": "^0.0.33"
"tmp": "^0.1.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -0,0 +1,33 @@
import { asCallback, fromCallback, fromEvent } from 'promise-toolbox'
import { getHandler } from '@xen-orchestra/fs'
import { relative } from 'path'
import { start as createRepl } from 'repl'
import Vhd, * as vhdLib from 'vhd-lib'
export default async args => {
const cwd = process.cwd()
const handler = getHandler({ url: 'file://' + cwd })
await handler.sync()
try {
const repl = createRepl({
prompt: 'vhd> ',
})
Object.assign(repl.context, vhdLib)
repl.context.handler = handler
repl.context.open = path => new Vhd(handler, relative(cwd, path))
// Make the REPL waits for promise completion.
repl.eval = (evaluate => (cmd, context, filename, cb) => {
asCallback.call(
fromCallback(cb => {
evaluate.call(repl, cmd, context, filename, cb)
}).then(value => (Array.isArray(value) ? Promise.all(value) : value)),
cb
)
})(repl.eval)
await fromEvent(repl, 'exit')
} finally {
await handler.forget()
}
}

View File

@@ -1,38 +1,40 @@
/* eslint-env jest */
import asyncIteratorToStream from 'async-iterator-to-stream'
import execa from 'execa'
import fs from 'fs-extra'
import getStream from 'get-stream'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { fromEvent, pFromCallback } from 'promise-toolbox'
import { getHandler } from '@xen-orchestra/fs'
import { pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { randomBytes } from 'crypto'
import Vhd, { chainVhd, createSyntheticStream, mergeVhd as vhdMerge } from './'
import { SECTOR_SIZE } from './src/_constants'
const initialDir = process.cwd()
let tempDir = null
jest.setTimeout(60000)
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
await pFromCallback(cb => rimraf(tempDir, cb))
})
async function createRandomFile(name, sizeMb) {
await execa('bash', [
'-c',
`< /dev/urandom tr -dc "\\t\\n [:alnum:]" | head -c ${sizeMb}M >${name}`,
])
async function createRandomFile(name, sizeMB) {
const createRandomStream = asyncIteratorToStream(function*(size) {
while (size-- > 0) {
yield Buffer.from([Math.floor(Math.random() * 256)])
}
})
const input = createRandomStream(sizeMB * 1024 * 1024)
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
}
async function checkFile(vhdName) {
@@ -53,31 +55,35 @@ async function convertFromRawToVhd(rawName, vhdName) {
test('blocks can be moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
const rawFileName = `${tempDir}/randomfile`
await createRandomFile(rawFileName, initalSize)
const vhdFileName = `${tempDir}/randomfile.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, vhdFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd._freeFirstBlockSpace(8000000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
await fs.readFile('randomfile')
const recoveredFileName = `${tempDir}/recovered`
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(
await fs.readFile(rawFileName)
)
})
test('the BAT MSB is not used for sign', async () => {
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
const handler = getHandler({ url: 'file://' + process.cwd() })
const vhd = new Vhd(handler, 'empty.vhd')
const emptyFileName = `${tempDir}/empty.vhd`
await execa('qemu-img', ['create', '-fvpc', emptyFileName, '1.8T'])
const handler = getHandler({ url: 'file://' })
const vhd = new Vhd(handler, emptyFileName)
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
// we want the bit 31 to be on, to prove it's not been used for sign
const hugeWritePositionSectors = Math.pow(2, 31) + 200
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
await checkFile('empty.vhd')
await checkFile(emptyFileName)
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
await vhd._freeFirstBlockSpace(hugePositionBytes)
@@ -85,9 +91,10 @@ test('the BAT MSB is not used for sign', async () => {
// we recover the data manually for speed reasons.
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
// hole before the block of data
const recoveredFile = await fs.open('recovered', 'w')
const recoveredFileName = `${tempDir}/recovered`
const recoveredFile = await fs.open(recoveredFileName, 'w')
try {
const vhd2 = new Vhd(handler, 'empty.vhd')
const vhd2 = new Vhd(handler, emptyFileName)
await vhd2.readHeaderAndFooter()
await vhd2.readBlockAllocationTable()
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
@@ -107,7 +114,7 @@ test('the BAT MSB is not used for sign', async () => {
fs.close(recoveredFile)
}
const recovered = await getStream.buffer(
await fs.createReadStream('recovered', {
await fs.createReadStream(recoveredFileName, {
start: hugePositionBytes,
end: hugePositionBytes + randomBuffer.length - 1,
})
@@ -117,27 +124,33 @@ test('the BAT MSB is not used for sign', async () => {
test('writeData on empty file', async () => {
const mbOfRandom = 3
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, emptyFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(0, randomData)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
const recoveredFileName = `${tempDir}/recovered`
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
})
test('writeData in 2 non-overlaping operations', async () => {
const mbOfRandom = 3
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, emptyFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
const splitPointSectors = 2
@@ -146,18 +159,21 @@ test('writeData in 2 non-overlaping operations', async () => {
splitPointSectors,
randomData.slice(splitPointSectors * 512)
)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
})
test('writeData in 2 overlaping operations', async () => {
const mbOfRandom = 3
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, emptyFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
const endFirstWrite = 3
@@ -167,119 +183,138 @@ test('writeData in 2 overlaping operations', async () => {
startSecondWrite,
randomData.slice(startSecondWrite * 512)
)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
})
test('BAT can be extended and blocks moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
const rawFileName = `${tempDir}/randomfile`
const recoveredFileName = `${tempDir}/recovered`
const vhdFileName = `${tempDir}/randomfile.vhd`
await createRandomFile(rawFileName, initalSize)
await convertFromRawToVhd(rawFileName, vhdFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, vhdFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.ensureBatSize(2000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
await fs.readFile('randomfile')
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(
await fs.readFile(rawFileName)
)
})
test('coalesce works with empty parent files', async () => {
const mbOfRandom = 2
await createRandomFile('randomfile', mbOfRandom)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
const vhdFileName = `${tempDir}/randomfile.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(rawFileName, mbOfRandom)
await convertFromRawToVhd(rawFileName, vhdFileName)
await execa('qemu-img', [
'create',
'-fvpc',
'empty.vhd',
emptyFileName,
mbOfRandom + 1 + 'M',
])
await checkFile('randomfile.vhd')
await checkFile('empty.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
await checkFile('randomfile.vhd')
await checkFile('empty.vhd')
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
await fs.readFile('randomfile')
await checkFile(vhdFileName)
await checkFile(emptyFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler._getSize(rawFileName)
await chainVhd(handler, emptyFileName, handler, vhdFileName, true)
await checkFile(vhdFileName)
await checkFile(emptyFileName)
await vhdMerge(handler, emptyFileName, handler, vhdFileName)
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(
await fs.readFile(rawFileName)
)
})
test('coalesce works in normal cases', async () => {
const mbOfRandom = 5
await createRandomFile('randomfile', mbOfRandom)
await createRandomFile('small_randomfile', Math.ceil(mbOfRandom / 2))
const randomFileName = `${tempDir}/randomfile`
const random2FileName = `${tempDir}/randomfile2`
const smallRandomFileName = `${tempDir}/small_randomfile`
const parentFileName = `${tempDir}/parent.vhd`
const child1FileName = `${tempDir}/child1.vhd`
const child2FileName = `${tempDir}/child2.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(randomFileName, mbOfRandom)
await createRandomFile(smallRandomFileName, Math.ceil(mbOfRandom / 2))
await execa('qemu-img', [
'create',
'-fvpc',
'parent.vhd',
parentFileName,
mbOfRandom + 1 + 'M',
])
await convertFromRawToVhd('randomfile', 'child1.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
const vhd = new Vhd(handler, 'child2.vhd')
await convertFromRawToVhd(randomFileName, child1FileName)
const handler = getHandler({ url: 'file://' })
await execa('vhd-util', [
'snapshot',
'-n',
child2FileName,
'-p',
child1FileName,
])
const vhd = new Vhd(handler, child2FileName)
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
vhd.footer.creatorApplication = 'xoa'
await vhd.writeFooter()
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
await execa('vhd-util', ['check', '-t', '-n', 'child1.vhd'])
await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true)
await execa('vhd-util', ['check', '-t', '-n', 'child2.vhd'])
const smallRandom = await fs.readFile('small_randomfile')
const newVhd = new Vhd(handler, 'child2.vhd')
const originalSize = await handler._getSize(randomFileName)
await chainVhd(handler, parentFileName, handler, child1FileName, true)
await execa('vhd-util', ['check', '-t', '-n', child1FileName])
await chainVhd(handler, child1FileName, handler, child2FileName, true)
await execa('vhd-util', ['check', '-t', '-n', child2FileName])
const smallRandom = await fs.readFile(smallRandomFileName)
const newVhd = new Vhd(handler, child2FileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(5, smallRandom)
await checkFile('child2.vhd')
await checkFile('child1.vhd')
await checkFile('parent.vhd')
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
await checkFile('parent.vhd')
await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true)
await checkFile('child2.vhd')
await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd')
await checkFile('parent.vhd')
await recoverRawContent(
'parent.vhd',
'recovered_from_coalescing',
originalSize
)
await execa('cp', ['randomfile', 'randomfile2'])
const fd = await fs.open('randomfile2', 'r+')
await checkFile(child2FileName)
await checkFile(child1FileName)
await checkFile(parentFileName)
await vhdMerge(handler, parentFileName, handler, child1FileName)
await checkFile(parentFileName)
await chainVhd(handler, parentFileName, handler, child2FileName, true)
await checkFile(child2FileName)
await vhdMerge(handler, parentFileName, handler, child2FileName)
await checkFile(parentFileName)
await recoverRawContent(parentFileName, recoveredFileName, originalSize)
await execa('cp', [randomFileName, random2FileName])
const fd = await fs.open(random2FileName, 'r+')
try {
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
} finally {
await fs.close(fd)
}
expect(await fs.readFile('recovered_from_coalescing')).toEqual(
await fs.readFile('randomfile2')
expect(await fs.readFile(recoveredFileName)).toEqual(
await fs.readFile(random2FileName)
)
})
test('createSyntheticStream passes vhd-util check', async () => {
test.only('createSyntheticStream passes vhd-util check', async () => {
const initalSize = 4
const expectedVhdSize = 4197888
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
const stream = await createSyntheticStream(handler, 'randomfile.vhd')
expect(stream.length).toEqual(expectedVhdSize)
await fromEvent(
stream.pipe(await fs.createWriteStream('recovered.vhd')),
'finish'
const rawFileName = `${tempDir}/randomfile`
const vhdFileName = `${tempDir}/randomfile.vhd`
const recoveredVhdFileName = `${tempDir}/recovered.vhd`
await createRandomFile(rawFileName, initalSize)
await convertFromRawToVhd(rawFileName, vhdFileName)
await checkFile(vhdFileName)
const handler = getHandler({ url: 'file://' })
const stream = await createSyntheticStream(handler, vhdFileName)
const expectedVhdSize = (await fs.stat(vhdFileName)).size
expect(stream.length).toEqual((await fs.stat(vhdFileName)).size)
await pFromCallback(cb =>
pipeline(stream, fs.createWriteStream(recoveredVhdFileName), cb)
)
await checkFile('recovered.vhd')
const stats = await fs.stat('recovered.vhd')
await checkFile(recoveredVhdFileName)
const stats = await fs.stat(recoveredVhdFileName)
expect(stats.size).toEqual(expectedVhdSize)
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
await execa('qemu-img', ['compare', recoveredVhdFileName, rawFileName])
})

View File

@@ -1,6 +1,6 @@
{
"name": "vhd-lib",
"version": "0.5.1",
"version": "0.6.1",
"license": "AGPL-3.0",
"description": "Primitives for VHD file handling",
"keywords": [],
@@ -22,11 +22,11 @@
},
"dependencies": {
"async-iterator-to-stream": "^1.0.2",
"core-js": "3.0.0-beta.3",
"core-js": "^3.0.0",
"from2": "^2.3.0",
"fs-extra": "^7.0.0",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.11.0",
"promise-toolbox": "^0.12.1",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1"
},
@@ -35,15 +35,16 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"@xen-orchestra/fs": "^0.7.1",
"@xen-orchestra/fs": "^0.8.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"execa": "^1.0.0",
"fs-promise": "^2.0.0",
"get-stream": "^4.0.0",
"index-modules": "^0.3.0",
"readable-stream": "^3.0.6",
"rimraf": "^2.6.2",
"tmp": "^0.0.33"
"tmp": "^0.1.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -0,0 +1,20 @@
import assert from 'assert'
import {
DISK_TYPE_DIFFERENCING,
DISK_TYPE_DYNAMIC,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
} from './_constants'
export default footer => {
assert.strictEqual(footer.cookie, FOOTER_COOKIE)
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
assert(footer.originalSize <= footer.currentSize)
assert(
footer.diskType === DISK_TYPE_DIFFERENCING ||
footer.diskType === DISK_TYPE_DYNAMIC
)
}

View File

@@ -0,0 +1,14 @@
import assert from 'assert'
import { HEADER_COOKIE, HEADER_VERSION, SECTOR_SIZE } from './_constants'
export default (header, footer) => {
assert.strictEqual(header.cookie, HEADER_COOKIE)
assert.strictEqual(header.dataOffset, undefined)
assert.strictEqual(header.headerVersion, HEADER_VERSION)
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
if (footer !== undefined) {
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
}
}

View File

@@ -0,0 +1,45 @@
import assert from 'assert'
import { BLOCK_UNUSED } from './_constants'
// get the identifiers and first sectors of the first and last block
// in the file
export default bat => {
const n = bat.length
assert.notStrictEqual(n, 0)
assert.strictEqual(n % 4, 0)
let i = 0
let j = 0
let first, firstSector, last, lastSector
// get first allocated block for initialization
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
i += 1
j += 4
if (j === n) {
return
}
}
lastSector = firstSector
first = last = i
while (j < n) {
const sector = bat.readUInt32BE(j)
if (sector !== BLOCK_UNUSED) {
if (sector < firstSector) {
first = i
firstSector = sector
} else if (sector > lastSector) {
last = i
lastSector = sector
}
}
i += 1
j += 4
}
return { first, firstSector, last, lastSector }
}

View File

@@ -0,0 +1,50 @@
export default async function readChunk(stream, n) {
if (n === 0) {
return Buffer.alloc(0)
}
return new Promise((resolve, reject) => {
const chunks = []
let i = 0
function clean() {
stream.removeListener('readable', onReadable)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
function resolve2() {
clean()
resolve(Buffer.concat(chunks, i))
}
function onEnd() {
resolve2()
clean()
}
function onError(error) {
reject(error)
clean()
}
function onReadable() {
const chunk = stream.read(n - i)
if (chunk === null) {
return // wait for more data
}
i += chunk.length
chunks.push(chunk)
if (i >= n) {
resolve2()
}
}
stream.on('end', onEnd)
stream.on('error', onError)
stream.on('readable', onReadable)
if (stream.readable) {
onReadable()
}
})
}

View File

@@ -0,0 +1,132 @@
/* eslint-env jest */
import asyncIteratorToStream from 'async-iterator-to-stream'
import execa from 'execa'
import fs from 'fs-extra'
import rimraf from 'rimraf'
import getStream from 'get-stream'
import tmp from 'tmp'
import { createReadStream, createWriteStream } from 'fs'
import { pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { createVhdStreamWithLength } from '.'
import { FOOTER_SIZE } from './_constants'
let tempDir = null
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
const RAW = 'raw'
const VHD = 'vpc'
const convert = (inputFormat, inputFile, outputFormat, outputFile) =>
execa('qemu-img', [
'convert',
'-f',
inputFormat,
'-O',
outputFormat,
inputFile,
outputFile,
])
const createRandomStream = asyncIteratorToStream(function*(size) {
let requested = Math.min(size, yield)
while (size > 0) {
const buf = Buffer.allocUnsafe(requested)
for (let i = 0; i < requested; ++i) {
buf[i] = Math.floor(Math.random() * 256)
}
requested = Math.min((size -= requested), yield buf)
}
})
async function createRandomFile(name, size) {
const input = await createRandomStream(size)
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
}
const forOwn = (object, cb) =>
Object.keys(object).forEach(key => cb(object[key], key, object))
describe('createVhdStreamWithLength', () => {
forOwn(
{
// qemu-img requires this length or it fill with null bytes which breaks
// the test
'can extract length': 34816,
'can handle empty file': 0,
},
(size, title) =>
it(title, async () => {
const inputRaw = `${tempDir}/input.raw`
await createRandomFile(inputRaw, size)
const inputVhd = `${tempDir}/input.vhd`
await convert(RAW, inputRaw, VHD, inputVhd)
const result = await createVhdStreamWithLength(
await createReadStream(inputVhd)
)
const { length } = result
const outputVhd = `${tempDir}/output.vhd`
await pFromCallback(
pipeline.bind(undefined, result, await createWriteStream(outputVhd))
)
// ensure the guessed length correspond to the stream length
const { size: outputSize } = await fs.stat(outputVhd)
expect(length).toEqual(outputSize)
// ensure the generated VHD is correct and contains the same data
const outputRaw = `${tempDir}/output.raw`
await convert(VHD, outputVhd, RAW, outputRaw)
await execa('cmp', [inputRaw, outputRaw])
})
)
it('can skip blank after the last block and before the footer', async () => {
const initialSize = 4 * 1024
const rawFileName = `${tempDir}/randomfile`
const vhdName = `${tempDir}/randomfile.vhd`
const outputVhdName = `${tempDir}/output.vhd`
await createRandomFile(rawFileName, initialSize)
await convert(RAW, rawFileName, VHD, vhdName)
const { size: vhdSize } = await fs.stat(vhdName)
// read file footer
const footer = await getStream.buffer(
createReadStream(vhdName, { start: vhdSize - FOOTER_SIZE })
)
// we'll override the footer
const endOfFile = await createWriteStream(vhdName, {
flags: 'r+',
start: vhdSize - FOOTER_SIZE,
})
// write a blank over the previous footer
await pFromCallback(cb => endOfFile.write(Buffer.alloc(FOOTER_SIZE), cb))
// write the footer after the new blank
await pFromCallback(cb => endOfFile.end(footer, cb))
const { size: longerSize } = await fs.stat(vhdName)
// check input file has been lengthened
expect(longerSize).toEqual(vhdSize + FOOTER_SIZE)
const result = await createVhdStreamWithLength(
await createReadStream(vhdName)
)
expect(result.length).toEqual(vhdSize)
const outputFileStream = await createWriteStream(outputVhdName)
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
const { size: outputSize } = await fs.stat(outputVhdName)
// check out file has been shortened again
expect(outputSize).toEqual(vhdSize)
await execa('qemu-img', ['compare', outputVhdName, vhdName])
})
})

View File

@@ -0,0 +1,84 @@
import assert from 'assert'
import { pipeline, Transform } from 'readable-stream'
import checkFooter from './_checkFooter'
import checkHeader from './_checkHeader'
import noop from './_noop'
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
import readChunk from './_readChunk'
import { FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from './_constants'
import { fuFooter, fuHeader } from './_structs'
class EndCutterStream extends Transform {
constructor(footerOffset, footerBuffer) {
super()
this._footerOffset = footerOffset
this._footerBuffer = footerBuffer
this._position = 0
this._done = false
}
_transform(data, encoding, callback) {
if (!this._done) {
if (this._position + data.length >= this._footerOffset) {
this._done = true
const difference = this._footerOffset - this._position
data = data.slice(0, difference)
this.push(data)
this.push(this._footerBuffer)
} else {
this.push(data)
}
this._position += data.length
}
callback()
}
}
export default async function createVhdStreamWithLength(stream) {
const readBuffers = []
let streamPosition = 0
async function readStream(length) {
const chunk = await readChunk(stream, length)
assert.strictEqual(chunk.length, length)
streamPosition += chunk.length
readBuffers.push(chunk)
return chunk
}
const footerBuffer = await readStream(FOOTER_SIZE)
const footer = fuFooter.unpack(footerBuffer)
checkFooter(footer)
const header = fuHeader.unpack(await readStream(HEADER_SIZE))
checkHeader(header, footer)
await readStream(header.tableOffset - streamPosition)
const table = await readStream(header.maxTableEntries * 4)
readBuffers.reverse()
for (const buf of readBuffers) {
stream.unshift(buf)
}
const firstAndLastBlocks = getFirstAndLastBlocks(table)
const footerOffset =
firstAndLastBlocks !== undefined
? firstAndLastBlocks.lastSector * SECTOR_SIZE +
Math.ceil(header.blockSize / SECTOR_SIZE / 8 / SECTOR_SIZE) *
SECTOR_SIZE +
header.blockSize
: Math.ceil(streamPosition / SECTOR_SIZE) * SECTOR_SIZE
// ignore any data after footerOffset and push footerBuffer
//
// this is necessary to ignore any blank space between the last block and the
// final footer which would invalidate the size we computed
const newStream = new EndCutterStream(footerOffset, footerBuffer)
pipeline(stream, newStream, noop)
newStream.length = footerOffset + FOOTER_SIZE
return newStream
}

View File

@@ -11,3 +11,6 @@ export {
} from './createReadableSparseStream'
export { default as createSyntheticStream } from './createSyntheticStream'
export { default as mergeVhd } from './merge'
export {
default as createVhdStreamWithLength,
} from './createVhdStreamWithLength'

View File

@@ -1,19 +1,16 @@
import assert from 'assert'
import { fromEvent } from 'promise-toolbox'
import checkFooter from './_checkFooter'
import checkHeader from './_checkHeader'
import constantStream from './_constant-stream'
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
import {
BLOCK_UNUSED,
DISK_TYPE_DIFFERENCING,
DISK_TYPE_DYNAMIC,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
HEADER_COOKIE,
HEADER_SIZE,
HEADER_VERSION,
PARENT_LOCATOR_ENTRIES,
PLATFORM_NONE,
PLATFORM_W2KU,
@@ -170,21 +167,10 @@ export default class Vhd {
}
const footer = (this.footer = fuFooter.unpack(bufFooter))
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
assert(footer.originalSize <= footer.currentSize)
assert(
footer.diskType === DISK_TYPE_DIFFERENCING ||
footer.diskType === DISK_TYPE_DYNAMIC
)
checkFooter(footer)
const header = (this.header = fuHeader.unpack(bufHeader))
assert.strictEqual(header.cookie, HEADER_COOKIE)
assert.strictEqual(header.dataOffset, undefined)
assert.strictEqual(header.headerVersion, HEADER_VERSION)
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
checkHeader(header, footer)
// Compute the number of sectors in one block.
// Default: One block contains 4096 sectors of 512 bytes.
@@ -242,49 +228,6 @@ export default class Vhd {
)
}
// get the identifiers and first sectors of the first and last block
// in the file
//
_getFirstAndLastBlocks() {
const n = this.header.maxTableEntries
const bat = this.blockTable
let i = 0
let j = 0
let first, firstSector, last, lastSector
// get first allocated block for initialization
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
i += 1
j += 4
if (i === n) {
const error = new Error('no allocated block found')
error.noBlock = true
throw error
}
}
lastSector = firstSector
first = last = i
while (i < n) {
const sector = bat.readUInt32BE(j)
if (sector !== BLOCK_UNUSED) {
if (sector < firstSector) {
first = i
firstSector = sector
} else if (sector > lastSector) {
last = i
lastSector = sector
}
}
i += 1
j += 4
}
return { first, firstSector, last, lastSector }
}
// =================================================================
// Write functions.
// =================================================================
@@ -310,41 +253,37 @@ export default class Vhd {
}
async _freeFirstBlockSpace(spaceNeededBytes) {
try {
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
const tableOffset = this.header.tableOffset
const { batSize } = this
const newMinSector = Math.ceil(
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
const firstAndLastBlocks = getFirstAndLastBlocks(this.blockTable)
if (firstAndLastBlocks === undefined) {
return
}
const { first, firstSector, lastSector } = firstAndLastBlocks
const tableOffset = this.header.tableOffset
const { batSize } = this
const newMinSector = Math.ceil(
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
)
if (
tableOffset + batSize + spaceNeededBytes >=
sectorsToBytes(firstSector)
) {
const { fullBlockSize } = this
const newFirstSector = Math.max(
lastSector + fullBlockSize / SECTOR_SIZE,
newMinSector
)
if (
tableOffset + batSize + spaceNeededBytes >=
sectorsToBytes(firstSector)
) {
const { fullBlockSize } = this
const newFirstSector = Math.max(
lastSector + fullBlockSize / SECTOR_SIZE,
newMinSector
)
debug(
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
)
// copy the first block at the end
const block = await this._read(
sectorsToBytes(firstSector),
fullBlockSize
)
await this._write(block, sectorsToBytes(newFirstSector))
await this._setBatEntry(first, newFirstSector)
await this.writeFooter(true)
spaceNeededBytes -= this.fullBlockSize
if (spaceNeededBytes > 0) {
return this._freeFirstBlockSpace(spaceNeededBytes)
}
}
} catch (e) {
if (!e.noBlock) {
throw e
debug(
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
)
// copy the first block at the end
const block = await this._read(sectorsToBytes(firstSector), fullBlockSize)
await this._write(block, sectorsToBytes(newFirstSector))
await this._setBatEntry(first, newFirstSector)
await this.writeFooter(true)
spaceNeededBytes -= this.fullBlockSize
if (spaceNeededBytes > 0) {
return this._freeFirstBlockSpace(spaceNeededBytes)
}
}
}

View File

@@ -4,22 +4,20 @@ import rimraf from 'rimraf'
import tmp from 'tmp'
import { createWriteStream, readFile } from 'fs-promise'
import { fromEvent, pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { createReadableRawStream, createReadableSparseStream } from './'
import { createFooter } from './src/_createFooterHeader'
const initialDir = process.cwd()
let tempDir = null
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('createFooter() does not crash', () => {
@@ -55,9 +53,10 @@ test('ReadableRawVHDStream does not crash', async () => {
}
const fileSize = 1000
const stream = createReadableRawStream(fileSize, mockParser)
const pipe = stream.pipe(createWriteStream('output.vhd'))
await fromEvent(pipe, 'finish')
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
await pFromCallback(cb =>
pipeline(stream, createWriteStream(`${tempDir}/output.vhd`), cb)
)
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
})
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
@@ -87,9 +86,9 @@ test('ReadableRawVHDStream detects when blocks are out of order', async () => {
new Promise((resolve, reject) => {
const stream = createReadableRawStream(100000, mockParser)
stream.on('error', reject)
const pipe = stream.pipe(createWriteStream('outputStream'))
pipe.on('finish', resolve)
pipe.on('error', reject)
pipeline(stream, createWriteStream(`${tempDir}/outputStream`), err =>
err ? reject(err) : resolve()
)
})
).rejects.toThrow('Received out of order blocks')
})
@@ -114,19 +113,19 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
blocks
)
expect(stream.length).toEqual(4197888)
const pipe = stream.pipe(createWriteStream('output.vhd'))
const pipe = stream.pipe(createWriteStream(`${tempDir}/output.vhd`))
await fromEvent(pipe, 'finish')
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
await execa('qemu-img', [
'convert',
'-f',
'vpc',
'-O',
'raw',
'output.vhd',
'out1.raw',
`${tempDir}/output.vhd`,
`${tempDir}/out1.raw`,
])
const out1 = await readFile('out1.raw')
const out1 = await readFile(`${tempDir}/out1.raw`)
const expected = Buffer.alloc(fileSize)
blocks.forEach(b => {
b.data.copy(expected, b.offsetBytes)

View File

@@ -41,7 +41,7 @@
"human-format": "^0.10.0",
"lodash": "^4.17.4",
"pw": "^0.0.4",
"xen-api": "^0.24.3"
"xen-api": "^0.25.1"
},
"devDependencies": {
"@babel/cli": "^7.1.5",

View File

@@ -4,7 +4,7 @@ const { PassThrough, pipeline } = require('readable-stream')
const humanFormat = require('human-format')
const Throttle = require('throttle')
const { isOpaqueRef } = require('../')
const isOpaqueRef = require('../dist/_isOpaqueRef').default
exports.createInputStream = path => {
if (path === undefined || path === '-') {

View File

@@ -1,6 +1,6 @@
{
"name": "xen-api",
"version": "0.24.3",
"version": "0.25.1",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [
@@ -33,20 +33,20 @@
"node": ">=6"
},
"dependencies": {
"bind-property-descriptor": "^1.0.0",
"blocked": "^1.2.1",
"debug": "^4.0.1",
"event-to-promise": "^0.8.0",
"exec-promise": "^0.7.0",
"http-request-plus": "^0.7.2",
"iterable-backoff": "^0.0.0",
"jest-diff": "^23.5.0",
"http-request-plus": "^0.8.0",
"jest-diff": "^24.0.0",
"json-rpc-protocol": "^0.13.1",
"kindof": "^2.0.0",
"lodash": "^4.17.4",
"make-error": "^1.3.0",
"minimist": "^1.2.0",
"ms": "^2.1.1",
"promise-toolbox": "^0.11.0",
"promise-toolbox": "^0.12.1",
"pw": "0.0.4",
"xmlrpc": "^1.3.2",
"xo-collection": "^0.4.1"
@@ -54,7 +54,10 @@
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/plugin-proposal-class-properties": "^7.3.4",
"@babel/plugin-proposal-decorators": "^7.0.0",
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.2.0",
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",

View File

@@ -0,0 +1,30 @@
import { BaseError } from 'make-error'
export default class XapiError extends BaseError {
static wrap(error) {
let code, params
if (Array.isArray(error)) {
// < XenServer 7.3
;[code, ...params] = error
} else {
code = error.message
params = error.data
if (!Array.isArray(params)) {
params = []
}
}
return new XapiError(code, params)
}
constructor(code, params) {
super(`${code}(${params.join(', ')})`)
this.code = code
this.params = params
// slots than can be assigned later
this.call = undefined
this.url = undefined
this.task = undefined
}
}

View File

@@ -0,0 +1,15 @@
// decorates fn so that more than one concurrent calls will be coalesced
export default function coalesceCalls(fn) {
let promise
const clean = () => {
promise = undefined
}
return function() {
if (promise !== undefined) {
return promise
}
promise = fn.apply(this, arguments)
promise.then(clean, clean)
return promise
}
}

View File

@@ -0,0 +1,26 @@
/* eslint-env jest */
import pDefer from 'promise-toolbox/defer'
import coalesceCalls from './_coalesceCalls'
describe('coalesceCalls', () => {
it('decorates an async function', async () => {
const fn = coalesceCalls(promise => promise)
const defer1 = pDefer()
const promise1 = fn(defer1.promise)
const defer2 = pDefer()
const promise2 = fn(defer2.promise)
defer1.resolve('foo')
expect(await promise1).toBe('foo')
expect(await promise2).toBe('foo')
const defer3 = pDefer()
const promise3 = fn(defer3.promise)
defer3.resolve('bar')
expect(await promise3).toBe('bar')
})
})

View File

@@ -0,0 +1,3 @@
import debug from 'debug'
export default debug('xen-api')

View File

@@ -0,0 +1,22 @@
import { Cancel } from 'promise-toolbox'
import XapiError from './_XapiError'
export default task => {
const { status } = task
if (status === 'cancelled') {
return Promise.reject(new Cancel('task canceled'))
}
if (status === 'failure') {
const error = XapiError.wrap(task.error_info)
error.task = task
return Promise.reject(error)
}
if (status === 'success') {
// the result might be:
// - empty string
// - an opaque reference
// - an XML-RPC value
return Promise.resolve(task.result)
}
}

View File

@@ -0,0 +1,3 @@
const SUFFIX = '.get_all_records'
export default method => method.endsWith(SUFFIX)

View File

@@ -0,0 +1,3 @@
const PREFIX = 'OpaqueRef:'
export default value => typeof value === 'string' && value.startsWith(PREFIX)

View File

@@ -0,0 +1,4 @@
const RE = /^[^.]+\.get_/
export default (method, args) =>
args.length === 1 && typeof args[0] === 'string' && RE.test(method)

View File

@@ -0,0 +1,8 @@
export default (setting, defaultValue) =>
setting === undefined
? () => defaultValue
: typeof setting === 'function'
? setting
: typeof setting === 'object'
? method => setting[method] ?? setting['*'] ?? defaultValue
: () => setting

View File

@@ -0,0 +1,18 @@
const URL_RE = /^(?:(https?:)\/*)?(?:([^:]+):([^@]+)@)?([^/]+?)(?::([0-9]+))?\/?$/
export default url => {
const matches = URL_RE.exec(url)
if (matches === null) {
throw new Error('invalid URL: ' + url)
}
const [, protocol = 'https:', username, password, hostname, port] = matches
const parsedUrl = { protocol, hostname, port }
if (username !== undefined) {
parsedUrl.username = decodeURIComponent(username)
}
if (password !== undefined) {
parsedUrl.password = decodeURIComponent(password)
}
return parsedUrl
}

View File

@@ -9,6 +9,7 @@ import minimist from 'minimist'
import pw from 'pw'
import { asCallback, fromCallback } from 'promise-toolbox'
import { filter, find, isArray } from 'lodash'
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
import { start as createRepl } from 'repl'
import { createClient } from './'
@@ -25,6 +26,20 @@ function askPassword(prompt = 'Password: ') {
})
}
const { getPrototypeOf, ownKeys } = Reflect
function getAllBoundDescriptors(object) {
const descriptors = { __proto__: null }
let current = object
do {
ownKeys(current).forEach(key => {
if (!(key in descriptors)) {
descriptors[key] = getBoundPropertyDescriptor(current, key, object)
}
})
} while ((current = getPrototypeOf(current)) !== null)
return descriptors
}
// ===================================================================
const usage = 'Usage: xen-api <url> [<user> [<password>]]'
@@ -78,11 +93,17 @@ const main = async args => {
const repl = createRepl({
prompt: `${xapi._humanId}> `,
})
repl.context.xapi = xapi
repl.context.diff = (a, b) => console.log('%s', diff(a, b))
repl.context.find = predicate => find(xapi.objects.all, predicate)
repl.context.findAll = predicate => filter(xapi.objects.all, predicate)
{
const ctx = repl.context
ctx.xapi = xapi
ctx.diff = (a, b) => console.log('%s', diff(a, b))
ctx.find = predicate => find(xapi.objects.all, predicate)
ctx.findAll = predicate => filter(xapi.objects.all, predicate)
Object.defineProperties(ctx, getAllBoundDescriptors(xapi))
}
// Make the REPL waits for promise completion.
repl.eval = (evaluate => (cmd, context, filename, cb) => {

File diff suppressed because it is too large Load Diff

View File

@@ -4,31 +4,33 @@ import { pDelay } from 'promise-toolbox'
import { createClient } from './'
const xapi = (() => {
const [, , url, user, password] = process.argv
return createClient({
auth: { user, password },
async function main([url]) {
const xapi = createClient({
allowUnauthorized: true,
url,
watchEvents: false,
})
})()
await xapi.connect()
xapi
.connect()
// Get the pool record's ref.
.then(() => xapi.call('pool.get_all'))
// Injects lots of events.
.then(([poolRef]) => {
const loop = () =>
pDelay
.call(
xapi.call('event.inject', 'pool', poolRef),
10 // A small delay is required to avoid overloading the Xen API.
)
.then(loop)
return loop()
let loop = true
process.on('SIGINT', () => {
loop = false
})
const { pool } = xapi
// eslint-disable-next-line no-unmodified-loop-condition
while (loop) {
await pool.update_other_config(
'xo:injectEvents',
Math.random()
.toString(36)
.slice(2)
)
await pDelay(1e2)
}
await pool.update_other_config('xo:injectEvents', null)
await xapi.disconnect()
}
main(process.argv.slice(2)).catch(console.error)

View File

@@ -0,0 +1,3 @@
import makeError from 'make-error'
export default makeError('UnsupportedTransport')

View File

@@ -0,0 +1,25 @@
// Prepare values before passing them to the XenAPI:
//
// - cast integers to strings
export default function prepare(param) {
if (Number.isInteger(param)) {
return String(param)
}
if (typeof param !== 'object' || param === null) {
return param
}
if (Array.isArray(param)) {
return param.map(prepare)
}
const values = {}
Object.keys(param).forEach(key => {
const value = param[key]
if (value !== undefined) {
values[key] = prepare(value)
}
})
return values
}

View File

@@ -1,3 +0,0 @@
import makeError from 'make-error'
export const UnsupportedTransport = makeError('UnsupportedTransport')

View File

@@ -1,7 +1,7 @@
import jsonRpc from './json-rpc'
import UnsupportedTransport from './_UnsupportedTransport'
import xmlRpc from './xml-rpc'
import xmlRpcJson from './xml-rpc-json'
import { UnsupportedTransport } from './_utils'
const factories = [jsonRpc, xmlRpcJson, xmlRpc]
const { length } = factories

View File

@@ -1,8 +1,9 @@
import httpRequestPlus from 'http-request-plus'
import { format, parse } from 'json-rpc-protocol'
import { UnsupportedTransport } from './_utils'
import UnsupportedTransport from './_UnsupportedTransport'
// https://github.com/xenserver/xenadmin/blob/0df39a9d83cd82713f32d24704852a0fd57b8a64/XenModel/XenAPI/Session.cs#L403-L433
export default ({ allowUnauthorized, url }) => {
return (method, args) =>
httpRequestPlus

View File

@@ -1,7 +1,8 @@
import { createClient, createSecureClient } from 'xmlrpc'
import { promisify } from 'promise-toolbox'
import { UnsupportedTransport } from './_utils'
import prepareXmlRpcParams from './_prepareXmlRpcParams'
import UnsupportedTransport from './_UnsupportedTransport'
const logError = error => {
if (error.res) {
@@ -71,10 +72,7 @@ const parseResult = result => {
throw new UnsupportedTransport()
}
export default ({
allowUnauthorized,
url: { hostname, path, port, protocol },
}) => {
export default ({ allowUnauthorized, url: { hostname, port, protocol } }) => {
const client = (protocol === 'https:' ? createSecureClient : createClient)({
host: hostname,
path: '/json',
@@ -83,5 +81,6 @@ export default ({
})
const call = promisify(client.methodCall, client)
return (method, args) => call(method, args).then(parseResult, logError)
return (method, args) =>
call(method, prepareXmlRpcParams(args)).then(parseResult, logError)
}

View File

@@ -1,6 +1,8 @@
import { createClient, createSecureClient } from 'xmlrpc'
import { promisify } from 'promise-toolbox'
import prepareXmlRpcParams from './_prepareXmlRpcParams'
const logError = error => {
if (error.res) {
console.error(
@@ -30,10 +32,7 @@ const parseResult = result => {
return result.Value
}
export default ({
allowUnauthorized,
url: { hostname, path, port, protocol },
}) => {
export default ({ allowUnauthorized, url: { hostname, port, protocol } }) => {
const client = (protocol === 'https:' ? createSecureClient : createClient)({
host: hostname,
port,
@@ -41,5 +40,6 @@ export default ({
})
const call = promisify(client.methodCall, client)
return (method, args) => call(method, args).then(parseResult, logError)
return (method, args) =>
call(method, prepareXmlRpcParams(args)).then(parseResult, logError)
}

View File

@@ -34,7 +34,7 @@
"chalk": "^2.2.0",
"exec-promise": "^0.7.0",
"fs-promise": "^2.0.3",
"http-request-plus": "^0.7.2",
"http-request-plus": "^0.8.0",
"human-format": "^0.10.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",
@@ -43,7 +43,7 @@
"nice-pipe": "0.0.0",
"pretty-ms": "^4.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.11.0",
"promise-toolbox": "^0.12.1",
"pump": "^3.0.0",
"pw": "^0.0.4",
"strip-indent": "^2.0.0",

View File

@@ -1,7 +1,7 @@
import kindOf from 'kindof'
import { BaseError } from 'make-error'
import { EventEmitter } from 'events'
import { forEach } from 'lodash'
import { forOwn } from 'lodash'
import isEmpty from './is-empty'
import isObject from './is-object'
@@ -10,6 +10,7 @@ import isObject from './is-object'
const {
create: createObject,
keys,
prototype: { hasOwnProperty },
} = Object
@@ -63,6 +64,16 @@ export class NoSuchItem extends BaseError {
// -------------------------------------------------------------------
const assertValidKey = key => {
if (!isValidKey(key)) {
throw new InvalidKey(key)
}
}
const isValidKey = key => typeof key === 'number' || typeof key === 'string'
// -------------------------------------------------------------------
export default class Collection extends EventEmitter {
constructor() {
super()
@@ -71,7 +82,7 @@ export default class Collection extends EventEmitter {
this._buffering = 0
this._indexes = createObject(null)
this._indexedItems = createObject(null)
this._items = {} // createObject(null)
this._items = createObject(null)
this._size = 0
}
@@ -113,7 +124,7 @@ export default class Collection extends EventEmitter {
}
clear() {
forEach(this._items, (_, key) => this._remove(key))
keys(this._items).forEach(this._remove, this)
}
remove(keyOrObjectWithId) {
@@ -176,8 +187,7 @@ export default class Collection extends EventEmitter {
return defaultValue
}
// Throws a NoSuchItem.
this._assertHas(key)
throw new NoSuchItem(key)
}
has(key) {
@@ -189,7 +199,7 @@ export default class Collection extends EventEmitter {
// -----------------------------------------------------------------
createIndex(name, index) {
const { _indexes: indexes } = this
const indexes = this._indexes
if (hasOwnProperty.call(indexes, name)) {
throw new DuplicateIndex(name)
}
@@ -201,7 +211,7 @@ export default class Collection extends EventEmitter {
}
deleteIndex(name) {
const { _indexes: indexes } = this
const indexes = this._indexes
if (!hasOwnProperty.call(indexes, name)) {
throw new NoSuchIndex(name)
}
@@ -218,7 +228,7 @@ export default class Collection extends EventEmitter {
// -----------------------------------------------------------------
*[Symbol.iterator]() {
const { _items: items } = this
const items = this._items
for (const key in items) {
yield [key, items[key]]
@@ -226,7 +236,7 @@ export default class Collection extends EventEmitter {
}
*keys() {
const { _items: items } = this
const items = this._items
for (const key in items) {
yield key
@@ -234,7 +244,7 @@ export default class Collection extends EventEmitter {
}
*values() {
const { _items: items } = this
const items = this._items
for (const key in items) {
yield items[key]
@@ -255,11 +265,11 @@ export default class Collection extends EventEmitter {
}
called = true
if (--this._buffering) {
if (--this._buffering !== 0) {
return
}
const { _buffer: buffer } = this
const buffer = this._buffer
// Due to deduplication there could be nothing in the buffer.
if (isEmpty(buffer)) {
@@ -276,7 +286,7 @@ export default class Collection extends EventEmitter {
data[buffer[key]][key] = this._items[key]
}
forEach(data, (items, action) => {
forOwn(data, (items, action) => {
if (!isEmpty(items)) {
this.emit(action, items)
}
@@ -306,16 +316,6 @@ export default class Collection extends EventEmitter {
}
}
_assertValidKey(key) {
if (!this._isValidKey(key)) {
throw new InvalidKey(key)
}
}
_isValidKey(key) {
return typeof key === 'number' || typeof key === 'string'
}
_remove(key) {
delete this._items[key]
this._size--
@@ -324,17 +324,17 @@ export default class Collection extends EventEmitter {
_resolveItem(keyOrObjectWithId, valueIfKey = undefined) {
if (valueIfKey !== undefined) {
this._assertValidKey(keyOrObjectWithId)
assertValidKey(keyOrObjectWithId)
return [keyOrObjectWithId, valueIfKey]
}
if (this._isValidKey(keyOrObjectWithId)) {
if (isValidKey(keyOrObjectWithId)) {
return [keyOrObjectWithId]
}
const key = this.getKey(keyOrObjectWithId)
this._assertValidKey(key)
assertValidKey(key)
return [key, keyOrObjectWithId]
}
@@ -347,7 +347,7 @@ export default class Collection extends EventEmitter {
}
if (action === ACTION_ADD) {
this._buffer[key] = this._buffer[key] ? ACTION_UPDATE : ACTION_ADD
this._buffer[key] = key in this._buffer ? ACTION_UPDATE : ACTION_ADD
} else if (action === ACTION_REMOVE) {
if (this._buffer[key] === ACTION_ADD) {
delete this._buffer[key]
@@ -356,7 +356,7 @@ export default class Collection extends EventEmitter {
}
} else {
// update
if (!this._buffer[key]) {
if (!(key in this._buffer)) {
this._buffer[key] = ACTION_UPDATE
}
}

View File

@@ -32,7 +32,7 @@
"dist/"
],
"engines": {
"node": ">=4"
"node": ">=6"
},
"dependencies": {
"csv-parser": "^2.1.0",
@@ -43,7 +43,7 @@
"xo-lib": "^0.9.0"
},
"devDependencies": {
"@types/node": "^10.12.2",
"@types/node": "^11.11.4",
"@types/through2": "^2.0.31",
"tslint": "^5.9.1",
"tslint-config-standard": "^8.0.1",

View File

@@ -1,8 +1,3 @@
declare module 'csv-parser' {
function csvParser(opts?: Object): any
export = csvParser
}
declare module 'exec-promise' {
function execPromise(cb: (args: string[]) => any): void
export = execPromise

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-google",
"version": "0.2.0",
"version": "0.2.1",
"license": "AGPL-3.0",
"description": "Google authentication plugin for XO-Server",
"keywords": [
@@ -33,7 +33,7 @@
"node": ">=6"
},
"dependencies": {
"passport-google-oauth20": "^1.0.0"
"passport-google-oauth20": "^2.0.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -39,7 +39,7 @@
"inquirer": "^6.0.0",
"ldapjs": "^1.0.1",
"lodash": "^4.17.4",
"promise-toolbox": "^0.11.0"
"promise-toolbox": "^0.12.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-backup-reports",
"version": "0.15.0",
"version": "0.16.0",
"license": "AGPL-3.0",
"description": "Backup reports plugin for XO-Server",
"keywords": [
@@ -36,6 +36,7 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/log": "^0.1.4",
"human-format": "^0.10.0",
"lodash": "^4.13.1",
"moment-timezone": "^0.5.13"
@@ -43,6 +44,8 @@
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.3",
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",

View File

@@ -1,8 +1,11 @@
import createLogger from '@xen-orchestra/log'
import humanFormat from 'human-format'
import moment from 'moment-timezone'
import { forEach, get, startCase } from 'lodash'
import { forEach, groupBy, startCase } from 'lodash'
import pkg from '../package'
const logger = createLogger('xo:xo-server-backup-reports')
export const configurationSchema = {
type: 'object',
@@ -46,6 +49,9 @@ export const testSchema = {
// ===================================================================
const INDENT = ' '
const UNKNOWN_ITEM = 'Unknown'
const ICON_FAILURE = '🚨'
const ICON_INTERRUPTED = '⚠️'
const ICON_SKIPPED = '⏩'
@@ -60,7 +66,7 @@ const STATUS_ICON = {
}
const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a'
const createDateFormater = timezone =>
const createDateFormatter = timezone =>
timezone !== undefined
? timestamp =>
moment(timestamp)
@@ -86,10 +92,6 @@ const formatSpeed = (bytes, milliseconds) =>
})
: 'N/A'
const logError = e => {
console.error('backup report error:', e)
}
const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern'
const NO_SUCH_OBJECT_ERROR = 'no such object'
const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain'
@@ -100,40 +102,114 @@ const isSkippedError = error =>
error.message === UNHEALTHY_VDI_CHAIN_ERROR ||
error.message === NO_SUCH_OBJECT_ERROR
const INDENT = ' '
const createGetTemporalDataMarkdown = formatDate => (
start,
end,
nbIndent = 0
) => {
const indent = INDENT.repeat(nbIndent)
// ===================================================================
const markdown = [`${indent}- **Start time**: ${formatDate(start)}`]
const STATUS = ['failure', 'interrupted', 'skipped', 'success']
const TITLE_BY_STATUS = {
failure: n => `## ${n} Failure${n === 1 ? '' : 's'}`,
interrupted: n => `## ${n} Interrupted`,
skipped: n => `## ${n} Skipped`,
success: n => `## ${n} Success${n === 1 ? '' : 'es'}`,
}
const getTemporalDataMarkdown = (end, start, formatDate) => {
const markdown = [`- **Start time**: ${formatDate(start)}`]
if (end !== undefined) {
markdown.push(`${indent}- **End time**: ${formatDate(end)}`)
markdown.push(`- **End time**: ${formatDate(end)}`)
const duration = end - start
if (duration >= 1) {
markdown.push(`${indent}- **Duration**: ${formatDuration(duration)}`)
markdown.push(`- **Duration**: ${formatDuration(duration)}`)
}
}
return markdown
}
const addWarnings = (text, warnings, nbIndent = 0) => {
if (warnings === undefined) {
const getWarningsMarkdown = (warnings = []) =>
warnings.map(({ message }) => `- **${ICON_WARNING} ${message}**`)
const getErrorMarkdown = task => {
let message
if (
task.status === 'success' ||
(message = task.result?.message ?? task.result?.code) === undefined
) {
return
}
const indent = INDENT.repeat(nbIndent)
warnings.forEach(({ message }) => {
text.push(`${indent}- **${ICON_WARNING} ${message}**`)
})
const label = task.status === 'skipped' ? 'Reason' : 'Error'
return `- **${label}**: ${message}`
}
const MARKDOWN_BY_TYPE = {
pool(task, { formatDate }) {
const { pool, poolMaster = {} } = task.data
const name = pool.name_label || poolMaster.name_label || UNKNOWN_ITEM
return {
body: [
`- **UUID**: ${pool.uuid}`,
...getTemporalDataMarkdown(task.end, task.start, formatDate),
getErrorMarkdown(task),
],
title: `[pool] ${name}`,
}
},
xo(task, { formatDate, jobName }) {
return {
body: [
...getTemporalDataMarkdown(task.end, task.start, formatDate),
getErrorMarkdown(task),
],
title: `[XO] ${jobName}`,
}
},
async remote(task, { formatDate, xo }) {
const id = task.data.id
const name = await xo.getRemote(id).then(
({ name }) => name,
error => {
logger.warn(error)
return UNKNOWN_ITEM
}
)
return {
body: [
`- **ID**: ${id}`,
...getTemporalDataMarkdown(task.end, task.start, formatDate),
getErrorMarkdown(task),
],
title: `[remote] ${name}`,
}
},
}
const getMarkdown = (task, props) =>
MARKDOWN_BY_TYPE[(task.data?.type)]?.(task, props)
const toMarkdown = parts => {
const lines = []
let indentLevel = 0
const helper = part => {
if (typeof part === 'string') {
lines.push(`${INDENT.repeat(indentLevel)}${part}`)
} else if (Array.isArray(part)) {
++indentLevel
part.forEach(helper)
--indentLevel
}
}
helper(parts)
return lines.join('\n')
}
// ===================================================================
class BackupReportsXoPlugin {
constructor(xo) {
this._xo = xo
this._report = this._wrapper.bind(this)
this._report = this._report.bind(this)
}
configure({ toMails, toXmpp }) {
@@ -146,72 +222,171 @@ class BackupReportsXoPlugin {
}
test({ runId }) {
return this._backupNgListener(undefined, undefined, undefined, runId)
return this._report(runId, undefined, true)
}
unload() {
this._xo.removeListener('job:terminated', this._report)
}
_wrapper(status, job, schedule, runJobId) {
if (job.type === 'metadataBackup') {
return
}
async _report(runJobId, { type, status } = {}, force) {
const xo = this._xo
try {
if (type === 'call') {
return this._legacyVmHandler(status)
}
return new Promise(resolve =>
resolve(
job.type === 'backup'
? this._backupNgListener(status, job, schedule, runJobId)
: this._listener(status, job, schedule, runJobId)
)
).catch(logError)
const log = await xo.getBackupNgLogs(runJobId)
if (log === undefined) {
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
}
const reportWhen = log.data.reportWhen
if (
!force &&
(reportWhen === 'never' ||
(reportWhen === 'failure' && log.status === 'success'))
) {
return
}
const [job, schedule] = await Promise.all([
await xo.getJob(log.jobId),
await xo.getSchedule(log.scheduleId).catch(error => {
logger.warn(error)
}),
])
if (job.type === 'backup') {
return this._ngVmHandler(log, job, schedule, force)
} else if (job.type === 'metadataBackup') {
return this._metadataHandler(log, job, schedule, force)
}
throw new Error(`Unknown backup job type: ${job.type}`)
} catch (error) {
logger.warn(error)
}
}
async _backupNgListener(_1, _2, schedule, runJobId) {
async _metadataHandler(log, { name: jobName }, schedule, force) {
const xo = this._xo
const log = await xo.getBackupNgLogs(runJobId)
if (log === undefined) {
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
const formatDate = createDateFormatter(schedule?.timezone)
const tasksByStatus = groupBy(log.tasks, 'status')
const n = log.tasks?.length ?? 0
const nSuccesses = tasksByStatus.success?.length ?? 0
if (!force && log.data.reportWhen === 'failure') {
delete tasksByStatus.success
}
// header
const markdown = [
`## Global status: ${log.status}`,
'',
`- **Job ID**: ${log.jobId}`,
`- **Job name**: ${jobName}`,
`- **Run ID**: ${log.id}`,
...getTemporalDataMarkdown(log.end, log.start, formatDate),
n !== 0 && `- **Successes**: ${nSuccesses} / ${n}`,
...getWarningsMarkdown(log.warnings),
getErrorMarkdown(log),
]
const nagiosText = []
// body
for (const status of STATUS) {
const tasks = tasksByStatus[status]
if (tasks === undefined) {
continue
}
// tasks header
markdown.push('---', '', TITLE_BY_STATUS[status](tasks.length))
// tasks body
for (const task of tasks) {
const taskMarkdown = await getMarkdown(task, {
formatDate,
jobName: log.jobName,
})
if (taskMarkdown === undefined) {
continue
}
const { title, body } = taskMarkdown
const subMarkdown = [...body, ...getWarningsMarkdown(task.warnings)]
if (task.status !== 'success') {
nagiosText.push(`[${task.status}] ${title}`)
}
for (const subTask of task.tasks ?? []) {
const taskMarkdown = await getMarkdown(subTask, { formatDate, xo })
if (taskMarkdown === undefined) {
continue
}
const icon = STATUS_ICON[subTask.status]
const { title, body } = taskMarkdown
subMarkdown.push([
`- **${title}** ${icon}`,
[...body, ...getWarningsMarkdown(subTask.warnings)],
])
}
markdown.push('', '', `### ${title}`, ...subMarkdown)
}
}
// footer
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
return this._sendReport({
subject: `[Xen Orchestra] ${log.status} Metadata backup report for ${
log.jobName
} ${STATUS_ICON[log.status]}`,
markdown: toMarkdown(markdown),
nagiosStatus: log.status === 'success' ? 0 : 2,
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Metadata backup report for ${
log.jobName
}`
: `[Xen Orchestra] [${log.status}] Metadata backup report for ${
log.jobName
} - ${nagiosText.join(' ')}`,
})
}
async _ngVmHandler(log, { name: jobName }, schedule, force) {
const xo = this._xo
const { reportWhen, mode } = log.data || {}
if (
reportWhen === 'never' ||
(log.status === 'success' && reportWhen === 'failure')
) {
return
}
if (schedule === undefined) {
schedule = await xo.getSchedule(log.scheduleId)
}
const formatDate = createDateFormatter(schedule?.timezone)
const jobName = (await xo.getJob(log.jobId, 'backup')).name
const formatDate = createDateFormater(schedule.timezone)
const getTemporalDataMarkdown = createGetTemporalDataMarkdown(formatDate)
if (
(log.status === 'failure' || log.status === 'skipped') &&
log.result !== undefined
) {
let markdown = [
if (log.tasks === undefined) {
const markdown = [
`## Global status: ${log.status}`,
'',
`- **Job ID**: ${log.jobId}`,
`- **Run ID**: ${runJobId}`,
`- **Run ID**: ${log.id}`,
`- **mode**: ${mode}`,
...getTemporalDataMarkdown(log.start, log.end),
`- **Error**: ${log.result.message}`,
...getTemporalDataMarkdown(log.end, log.start, formatDate),
getErrorMarkdown(log),
...getWarningsMarkdown(log.warnings),
'---',
'',
`*${pkg.name} v${pkg.version}*`,
]
addWarnings(markdown, log.warnings)
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
markdown = markdown.join('\n')
return this._sendReport({
subject: `[Xen Orchestra] ${
log.status
} Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
markdown,
markdown: toMarkdown(markdown),
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${
log.status
@@ -231,7 +406,7 @@ class BackupReportsXoPlugin {
let nSkipped = 0
let nInterrupted = 0
for (const taskLog of log.tasks) {
if (taskLog.status === 'success' && reportWhen === 'failure') {
if (!force && taskLog.status === 'success' && reportWhen === 'failure') {
continue
}
@@ -244,16 +419,16 @@ class BackupReportsXoPlugin {
`### ${vm !== undefined ? vm.name_label : 'VM not found'}`,
'',
`- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`,
...getTemporalDataMarkdown(taskLog.start, taskLog.end),
...getTemporalDataMarkdown(taskLog.end, taskLog.start, formatDate),
...getWarningsMarkdown(taskLog.warnings),
]
addWarnings(text, taskLog.warnings)
const failedSubTasks = []
const snapshotText = []
const srsText = []
const remotesText = []
for (const subTaskLog of taskLog.tasks || []) {
for (const subTaskLog of taskLog.tasks ?? []) {
if (
subTaskLog.message !== 'export' &&
subTaskLog.message !== 'snapshot'
@@ -262,29 +437,36 @@ class BackupReportsXoPlugin {
}
const icon = STATUS_ICON[subTaskLog.status]
const errorMessage = ` - **Error**: ${get(
subTaskLog.result,
'message'
)}`
const type = subTaskLog.data?.type
const errorMarkdown = getErrorMarkdown(subTaskLog)
if (subTaskLog.message === 'snapshot') {
snapshotText.push(
`- **Snapshot** ${icon}`,
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 1)
)
} else if (subTaskLog.data.type === 'remote') {
snapshotText.push(`- **Snapshot** ${icon}`, [
...getTemporalDataMarkdown(
subTaskLog.end,
subTaskLog.start,
formatDate
),
])
} else if (type === 'remote') {
const id = subTaskLog.data.id
const remote = await xo.getRemote(id).catch(() => {})
remotesText.push(
` - **${
remote !== undefined ? remote.name : `Remote Not found`
}** (${id}) ${icon}`,
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
)
addWarnings(remotesText, subTaskLog.warnings, 2)
const remote = await xo.getRemote(id).catch(error => {
logger.warn(error)
})
const title = remote !== undefined ? remote.name : `Remote Not found`
remotesText.push(`- **${title}** (${id}) ${icon}`, [
...getTemporalDataMarkdown(
subTaskLog.end,
subTaskLog.start,
formatDate
),
...getWarningsMarkdown(subTaskLog.warnings),
errorMarkdown,
])
if (subTaskLog.status === 'failure') {
failedSubTasks.push(remote !== undefined ? remote.name : id)
remotesText.push('', errorMessage)
}
} else {
const id = subTaskLog.data.id
@@ -294,14 +476,17 @@ class BackupReportsXoPlugin {
} catch (e) {}
const [srName, srUuid] =
sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, id]
srsText.push(
` - **${srName}** (${srUuid}) ${icon}`,
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
)
addWarnings(srsText, subTaskLog.warnings, 2)
srsText.push(`- **${srName}** (${srUuid}) ${icon}`, [
...getTemporalDataMarkdown(
subTaskLog.end,
subTaskLog.start,
formatDate
),
...getWarningsMarkdown(subTaskLog.warnings),
errorMarkdown,
])
if (subTaskLog.status === 'failure') {
failedSubTasks.push(sr !== undefined ? sr.name_label : id)
srsText.push('', errorMessage)
}
}
@@ -313,53 +498,48 @@ class BackupReportsXoPlugin {
return
}
const operationInfoText = []
addWarnings(operationInfoText, operationLog.warnings, 3)
if (operationLog.status === 'success') {
const size = operationLog.result.size
const size = operationLog.result?.size
if (size > 0) {
if (operationLog.message === 'merge') {
globalMergeSize += size
} else {
globalTransferSize += size
}
}
operationInfoText.push(
` - **Size**: ${formatSize(size)}`,
` - **Speed**: ${formatSpeed(
size,
operationLog.end - operationLog.start
)}`
)
} else if (get(operationLog.result, 'message') !== undefined) {
operationInfoText.push(
` - **Error**: ${get(operationLog.result, 'message')}`
)
}
const operationText = [
` - **${operationLog.message}** ${
STATUS_ICON[operationLog.status]
}`,
...getTemporalDataMarkdown(operationLog.start, operationLog.end, 3),
...operationInfoText,
].join('\n')
if (get(subTaskLog, 'data.type') === 'remote') {
`- **${operationLog.message}** ${STATUS_ICON[operationLog.status]}`,
[
...getTemporalDataMarkdown(
operationLog.end,
operationLog.start,
formatDate
),
size > 0 && `- **Size**: ${formatSize(size)}`,
size > 0 &&
`- **Speed**: ${formatSpeed(
size,
operationLog.end - operationLog.start
)}`,
...getWarningsMarkdown(operationLog.warnings),
getErrorMarkdown(operationLog),
],
]
if (type === 'remote') {
remotesText.push(operationText)
remotesText.join('\n')
}
if (get(subTaskLog, 'data.type') === 'SR') {
} else if (type === 'SR') {
srsText.push(operationText)
srsText.join('\n')
}
})
}
if (srsText.length !== 0) {
srsText.unshift(`- **SRs**`)
}
if (remotesText.length !== 0) {
remotesText.unshift(`- **Remotes**`)
}
const subText = [...snapshotText, '', ...srsText, '', ...remotesText]
const subText = [
...snapshotText,
srsText.length !== 0 && `- **SRs**`,
srsText,
remotesText.length !== 0 && `- **Remotes**`,
remotesText,
]
if (taskLog.result !== undefined) {
if (taskLog.status === 'skipped') {
++nSkipped
@@ -369,8 +549,7 @@ class BackupReportsXoPlugin {
taskLog.result.message === UNHEALTHY_VDI_CHAIN_ERROR
? UNHEALTHY_VDI_CHAIN_MESSAGE
: taskLog.result.message
}`,
''
}`
)
nagiosText.push(
`[(Skipped) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
@@ -379,11 +558,7 @@ class BackupReportsXoPlugin {
)
} else {
++nFailures
failedVmsText.push(
...text,
`- **Error**: ${taskLog.result.message}`,
''
)
failedVmsText.push(...text, `- **Error**: ${taskLog.result.message}`)
nagiosText.push(
`[(Failed) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
@@ -394,7 +569,7 @@ class BackupReportsXoPlugin {
} else {
if (taskLog.status === 'failure') {
++nFailures
failedVmsText.push(...text, '', '', ...subText, '')
failedVmsText.push(...text, ...subText)
nagiosText.push(
`[${
vm !== undefined ? vm.name_label : 'undefined'
@@ -402,37 +577,34 @@ class BackupReportsXoPlugin {
)
} else if (taskLog.status === 'interrupted') {
++nInterrupted
interruptedVmsText.push(...text, '', '', ...subText, '')
interruptedVmsText.push(...text, ...subText)
nagiosText.push(
`[(Interrupted) ${vm !== undefined ? vm.name_label : 'undefined'}]`
)
} else {
successfulVmsText.push(...text, '', '', ...subText, '')
successfulVmsText.push(...text, ...subText)
}
}
}
const nVms = log.tasks.length
const nSuccesses = nVms - nFailures - nSkipped - nInterrupted
let markdown = [
const markdown = [
`## Global status: ${log.status}`,
'',
`- **Job ID**: ${log.jobId}`,
`- **Run ID**: ${runJobId}`,
`- **Run ID**: ${log.id}`,
`- **mode**: ${mode}`,
...getTemporalDataMarkdown(log.start, log.end),
...getTemporalDataMarkdown(log.end, log.start, formatDate),
`- **Successes**: ${nSuccesses} / ${nVms}`,
globalTransferSize !== 0 &&
`- **Transfer size**: ${formatSize(globalTransferSize)}`,
globalMergeSize !== 0 &&
`- **Merge size**: ${formatSize(globalMergeSize)}`,
...getWarningsMarkdown(log.warnings),
'',
]
if (globalTransferSize !== 0) {
markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`)
}
if (globalMergeSize !== 0) {
markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`)
}
addWarnings(markdown, log.warnings)
markdown.push('')
if (nFailures !== 0) {
markdown.push(
'---',
@@ -457,7 +629,7 @@ class BackupReportsXoPlugin {
)
}
if (nSuccesses !== 0 && reportWhen !== 'failure') {
if (nSuccesses !== 0 && (force || reportWhen !== 'failure')) {
markdown.push(
'---',
'',
@@ -468,9 +640,8 @@ class BackupReportsXoPlugin {
}
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
markdown = markdown.join('\n')
return this._sendReport({
markdown,
markdown: toMarkdown(markdown),
subject: `[Xen Orchestra] ${log.status} Backup report for ${jobName} ${
STATUS_ICON[log.status]
}`,
@@ -510,9 +681,9 @@ class BackupReportsXoPlugin {
])
}
_listener(status) {
_legacyVmHandler(status) {
const { calls, timezone, error } = status
const formatDate = createDateFormater(timezone)
const formatDate = createDateFormatter(timezone)
if (status.error !== undefined) {
const [globalStatus, icon] =

View File

@@ -32,7 +32,7 @@
"node": ">=6"
},
"dependencies": {
"http-request-plus": "^0.7.2",
"http-request-plus": "^0.8.0",
"jsonrpc-websocket-client": "^0.4.1"
},
"devDependencies": {

View File

@@ -32,9 +32,9 @@
"node": ">=6"
},
"dependencies": {
"nodemailer": "^5.0.0",
"nodemailer": "^6.1.0",
"nodemailer-markdown": "^1.0.1",
"promise-toolbox": "^0.11.0"
"promise-toolbox": "^0.12.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -33,7 +33,7 @@
"node": ">=6"
},
"dependencies": {
"promise-toolbox": "^0.11.0",
"promise-toolbox": "^0.12.1",
"slack-node": "^0.1.8"
},
"devDependencies": {

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-usage-report",
"version": "0.7.1",
"version": "0.7.2",
"license": "AGPL-3.0",
"description": "",
"keywords": [
@@ -39,10 +39,10 @@
"@xen-orchestra/cron": "^1.0.3",
"@xen-orchestra/log": "^0.1.4",
"handlebars": "^4.0.6",
"html-minifier": "^3.5.8",
"html-minifier": "^4.0.0",
"human-format": "^0.10.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.11.0"
"promise-toolbox": "^0.12.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -494,7 +494,7 @@ async function getHostsMissingPatches({ runningHosts, xo }) {
map(runningHosts, async host => {
let hostsPatches = await xo
.getXapi(host)
.listMissingPoolPatchesOnHost(host._xapiId)
.listMissingPatches(host._xapiId)
.catch(error => {
console.error(
'[WARN] error on fetching hosts missing patches:',

View File

@@ -4,6 +4,11 @@
// ===================================================================
// https://expressjs.com/en/advanced/best-practice-performance.html#set-node_env-to-production
if (process.env.NODE_ENV === undefined) {
process.env.NODE_ENV = 'production'
}
// Better stack traces if possible.
require('../better-stacks')

View File

@@ -9,6 +9,18 @@ datadir = '/var/lib/xo-server/data'
# Necessary for external authentication providers.
createUserOnFirstSignin = true
# XAPI does not support chunked encoding in HTTP requests which is necessary
# when the content length is not know which is the case for many backup related
# operations in XO.
#
# It's possible to work-around this for VHDs because it's possible to guess
# their size just by looking at the beginning of the stream.
#
# But it is a guess, not a certainty, it depends on how the VHDs are formatted
# by XenServer, therefore it's disabled for the moment but can be enabled
# specifically for a user if necessary.
guessVhdSizeOnImport = false
# Whether API logs should contains the full request/response on
# errors.
#
@@ -21,6 +33,28 @@ verboseApiLogsOnErrors = false
[apiWebSocketOptions]
perMessageDeflate = { threshold = 524288 } # 512kiB
[authentication]
defaultTokenValidity = '30 days'
maxTokenValidity = '0.5 year'
# Default to `maxTokenValidity`
#permanentCookieValidity = '30 days'
# Default to `undefined`, ie as long as the browser is not restarted
#
# https://developer.mozilla.org/fr/docs/Web/HTTP/Headers/Set-Cookie#Session_cookie
#sessionCookieValidity = '10 hours'
[backup]
# Delay for which backups listing on a remote is cached
listingDebounce = '1 min'
# Helmet handles HTTP security via headers
#
# https://helmetjs.github.io/docs/
#[http.helmet.hsts]
#includeSubDomains = false
[[http.listen]]
port = 80

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server",
"version": "5.36.1",
"version": "5.40.0",
"license": "AGPL-3.0",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -35,8 +35,9 @@
"@iarna/toml": "^2.2.1",
"@xen-orchestra/async-map": "^0.0.0",
"@xen-orchestra/cron": "^1.0.3",
"@xen-orchestra/defined": "^0.0.0",
"@xen-orchestra/emit-async": "^0.0.0",
"@xen-orchestra/fs": "^0.7.1",
"@xen-orchestra/fs": "^0.8.0",
"@xen-orchestra/log": "^0.1.4",
"@xen-orchestra/mixin": "^0.0.0",
"ajv": "^6.1.1",
@@ -47,12 +48,13 @@
"blocked": "^1.2.1",
"bluebird": "^3.5.1",
"body-parser": "^1.18.2",
"compression": "^1.7.3",
"connect-flash": "^0.1.1",
"cookie": "^0.3.1",
"cookie-parser": "^1.4.3",
"d3-time-format": "^2.1.1",
"debug": "^4.0.1",
"decorator-synchronized": "^0.3.0",
"decorator-synchronized": "^0.5.0",
"deptree": "^1.0.0",
"escape-string-regexp": "^1.0.5",
"event-to-promise": "^0.8.0",
@@ -69,11 +71,11 @@
"helmet": "^3.9.0",
"highland": "^2.11.1",
"http-proxy": "^1.16.2",
"http-request-plus": "^0.7.2",
"http-request-plus": "^0.8.0",
"http-server-plus": "^0.10.0",
"human-format": "^0.10.0",
"is-redirect": "^1.0.0",
"iterable-backoff": "^0.0.0",
"iterable-backoff": "^0.1.0",
"jest-worker": "^24.0.0",
"js-yaml": "^3.10.0",
"json-rpc-peer": "^0.15.3",
@@ -93,17 +95,18 @@
"ms": "^2.1.1",
"multikey-hash": "^1.0.4",
"ndjson": "^1.5.0",
"otplib": "^10.0.1",
"otplib": "^11.0.0",
"parse-pairs": "^0.2.2",
"partial-stream": "0.0.0",
"passport": "^0.4.0",
"passport-local": "^1.0.0",
"pretty-format": "^23.0.0",
"promise-toolbox": "^0.11.0",
"pretty-format": "^24.0.0",
"promise-toolbox": "^0.12.1",
"proxy-agent": "^3.0.0",
"pug": "^2.0.0-rc.4",
"pump": "^3.0.0",
"pw": "^0.0.4",
"readable-stream": "^3.2.0",
"redis": "^2.8.0",
"schema-inspector": "^1.6.8",
"semver": "^5.4.1",
@@ -112,20 +115,20 @@
"stack-chain": "^2.0.0",
"stoppable": "^1.0.5",
"struct-fu": "^1.2.0",
"tar-stream": "^1.5.5",
"tar-stream": "^2.0.1",
"through2": "^3.0.0",
"tmp": "^0.0.33",
"tmp": "^0.1.0",
"uuid": "^3.0.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^0.5.1",
"vhd-lib": "^0.6.1",
"ws": "^6.0.0",
"xen-api": "^0.24.3",
"xen-api": "^0.25.1",
"xml2js": "^0.4.19",
"xo-acl-resolver": "^0.4.1",
"xo-collection": "^0.4.1",
"xo-common": "^0.2.0",
"xo-remote-parser": "^0.5.0",
"xo-vmdk-to-vhd": "^0.1.6",
"xo-vmdk-to-vhd": "^0.1.7",
"yazl": "^2.4.3"
},
"devDependencies": {

View File

@@ -44,6 +44,14 @@
#=====================================================================
# Directory containing the database of XO.
# Currently used for logs.
#
# Default: '/var/lib/xo-server/data'
#datadir = '/var/lib/xo-server/data'
#=====================================================================
# Configuration of the embedded HTTP server.
[http]
# If set to true, all HTTP traffic will be redirected to the first HTTPs
@@ -136,12 +144,6 @@ port = 80
# del = '3dda29ad-3015-44f9-b13b-fa570de92489'
# srem = '3fd758c9-5610-4e9d-a058-dbf4cb6d8bf0'
# Directory containing the database of XO.
# Currently used for logs.
#
# Default: '/var/lib/xo-server/data'
#datadir = '/var/lib/xo-server/data'
#=====================================================================
# Configuration for remotes

View File

@@ -0,0 +1,87 @@
class Node {
constructor(value) {
this.children = new Map()
this.value = value
}
}
function del(node, i, keys) {
if (i === keys.length) {
if (node instanceof Node) {
node.value = undefined
return node
}
return
}
if (!(node instanceof Node)) {
return node
}
const key = keys[i]
const { children } = node
const child = children.get(key)
if (child === undefined) {
return node
}
const newChild = del(child, i + 1, keys)
if (newChild === undefined) {
if (children.size === 1) {
return node.value
}
children.delete(key)
} else if (newChild !== child) {
children.set(key, newChild)
}
return node
}
function get(node, i, keys) {
return i === keys.length
? node instanceof Node
? node.value
: node
: node instanceof Node
? get(node.children.get(keys[i]), i + 1, keys)
: undefined
}
function set(node, i, keys, value) {
if (i === keys.length) {
if (node instanceof Node) {
node.value = value
return node
}
return value
}
const key = keys[i]
if (!(node instanceof Node)) {
node = new Node(node)
node.children.set(key, set(undefined, i + 1, keys, value))
} else {
const { children } = node
const child = children.get(key)
const newChild = set(child, i + 1, keys, value)
if (newChild !== child) {
children.set(key, newChild)
}
}
return node
}
export default class MultiKeyMap {
constructor() {
// each node is either a value or a Node if it contains children
this._root = undefined
}
delete(keys) {
this._root = del(this._root, 0, keys)
}
get(keys) {
return get(this._root, 0, keys)
}
set(keys, value) {
this._root = set(this._root, 0, keys, value)
}
}

View File

@@ -0,0 +1,22 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
function* values(object) {
const keys = Object.keys(object)
for (let i = 0, n = keys.length; i < n; ++i) {
yield object[keys[i]]
}
}
/**
* Creates a NDJSON stream of all the values
*
* @param {(Array|Object)} collection
*/
module.exports = asyncIteratorToStream(function*(collection) {
for (const value of Array.isArray(collection)
? collection
: values(collection)) {
yield JSON.stringify(value)
yield '\n'
}
})

View File

@@ -0,0 +1,3 @@
// Ensure the value is an array, wrap it if necessary.
export default value =>
value === undefined ? [] : Array.isArray(value) ? value : [value]

View File

@@ -0,0 +1,21 @@
/* eslint-env jest */
import ensureArray from './_ensureArray'
describe('ensureArray()', function() {
it('wrap the value in an array', function() {
const value = 'foo'
expect(ensureArray(value)).toEqual([value])
})
it('returns an empty array for undefined', function() {
expect(ensureArray(undefined)).toEqual([])
})
it('returns the object itself if is already an array', function() {
const array = ['foo', 'bar', 'baz']
expect(ensureArray(array)).toBe(array)
})
})

View File

@@ -0,0 +1,39 @@
import ensureArray from './_ensureArray'
import MultiKeyMap from './_MultiKeyMap'
function removeCacheEntry(cache, keys) {
cache.delete(keys)
}
function scheduleRemoveCacheEntry(keys, expires) {
const delay = expires - Date.now()
if (delay <= 0) {
removeCacheEntry(this, keys)
} else {
setTimeout(removeCacheEntry, delay, this, keys)
}
}
const defaultKeyFn = () => []
// debounce an async function so that all subsequent calls in a delay receive
// the same result
//
// similar to `p-debounce` with `leading` set to `true` but with key support
export default (fn, delay, keyFn = defaultKeyFn) => {
const cache = new MultiKeyMap()
return function() {
const keys = ensureArray(keyFn.apply(this, arguments))
let promise = cache.get(keys)
if (promise === undefined) {
cache.set(keys, (promise = fn.apply(this, arguments)))
const remove = scheduleRemoveCacheEntry.bind(
cache,
keys,
Date.now() + delay
)
promise.then(remove, remove)
}
return promise
}
}

View File

@@ -0,0 +1,12 @@
import ms from 'ms'
export default value => {
if (typeof value === 'number') {
return value
}
const duration = ms(value)
if (duration === undefined) {
throw new TypeError(`not a valid duration: ${duration}`)
}
return duration
}

View File

@@ -1,5 +1,8 @@
import { basename } from 'path'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import createNdJsonStream from '../_createNdJsonStream'
import { safeDateFormat } from '../utils'
export function createJob({ schedules, ...job }) {
@@ -150,12 +153,26 @@ runJob.params = {
// -----------------------------------------------------------------------------
export function getAllLogs() {
return this.getBackupNgLogs()
async function handleGetAllLogs(req, res) {
const logs = await this.getBackupNgLogs()
res.set('Content-Type', 'application/json')
return fromCallback(cb => pipeline(createNdJsonStream(logs), res, cb))
}
export function getAllLogs({ ndjson = false }) {
return ndjson
? this.registerHttpRequest(handleGetAllLogs).then($getFrom => ({
$getFrom,
}))
: this.getBackupNgLogs()
}
getAllLogs.permission = 'admin'
getAllLogs.params = {
ndjson: { type: 'boolean', optional: true },
}
export function getLogs({ after, before, limit, ...filter }) {
return this.getBackupNgLogsSorted({ after, before, limit, filter })
}

View File

@@ -199,59 +199,6 @@ forget.resolve = {
// -------------------------------------------------------------------
// Returns an array of missing new patches in the host
// Returns an empty array if up-to-date
// Throws an error if the host is not running the latest XS version
export function listMissingPatches({ host }) {
return this.getXapi(host).listMissingPoolPatchesOnHost(host._xapiId)
}
listMissingPatches.description =
'return an array of missing new patches in the host'
listMissingPatches.params = {
host: { type: 'string' },
}
listMissingPatches.resolve = {
host: ['host', 'host', 'view'],
}
// -------------------------------------------------------------------
export function installPatch({ host, patch: patchUuid }) {
return this.getXapi(host).installPoolPatchOnHost(patchUuid, host._xapiId)
}
installPatch.description = 'install a patch on an host'
installPatch.params = {
host: { type: 'string' },
patch: { type: 'string' },
}
installPatch.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
export function installAllPatches({ host }) {
return this.getXapi(host).installAllPoolPatchesOnHost(host._xapiId)
}
installAllPatches.description = 'install all the missing patches on a host'
installAllPatches.params = {
host: { type: 'string' },
}
installAllPatches.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
export function emergencyShutdownHost({ host }) {
return this.getXapi(host).emergencyShutdownHost(host._xapiId)
}

View File

@@ -101,3 +101,42 @@ runJob.params = {
type: 'string',
},
}
export async function list({ remotes }) {
return this.listMetadataBackups(remotes)
}
list.permission = 'admin'
list.params = {
remotes: {
type: 'array',
items: {
type: 'string',
},
},
}
export function restore({ id }) {
return this.restoreMetadataBackup(id)
}
restore.permission = 'admin'
restore.params = {
id: {
type: 'string',
},
}
function delete_({ id }) {
return this.deleteMetadataBackup(id)
}
delete_.permission = 'admin'
delete_.params = {
id: {
type: 'string',
},
}
export { delete_ as delete }

View File

@@ -85,34 +85,35 @@ createBonded.description =
// ===================================================================
export async function set({
network,
automatic,
defaultIsLocked,
name_description: nameDescription,
name_label: nameLabel,
defaultIsLocked,
id,
network,
}) {
await this.getXapi(network).setNetworkProperties(network._xapiId, {
automatic,
defaultIsLocked,
nameDescription,
nameLabel,
defaultIsLocked,
})
}
set.params = {
id: {
type: 'string',
automatic: {
type: 'boolean',
optional: true,
},
name_label: {
type: 'string',
defaultIsLocked: {
type: 'boolean',
optional: true,
},
name_description: {
type: 'string',
optional: true,
},
defaultIsLocked: {
type: 'boolean',
name_label: {
type: 'string',
optional: true,
},
}

View File

@@ -1,6 +1,4 @@
import { format } from 'json-rpc-peer'
import { differenceBy } from 'lodash'
import { mapToArray } from '../utils'
// ===================================================================
@@ -75,40 +73,43 @@ setPoolMaster.resolve = {
// -------------------------------------------------------------------
export async function installPatch({ pool, patch: patchUuid }) {
await this.getXapi(pool).installPoolPatchOnAllHosts(patchUuid)
// Returns an array of missing new patches in the host
// Returns an empty array if up-to-date
export function listMissingPatches({ host }) {
return this.getXapi(host).listMissingPatches(host._xapiId)
}
installPatch.params = {
pool: {
type: 'string',
},
patch: {
type: 'string',
},
listMissingPatches.description =
'return an array of missing new patches in the host'
listMissingPatches.params = {
host: { type: 'string' },
}
installPatch.resolve = {
pool: ['pool', 'pool', 'administrate'],
listMissingPatches.resolve = {
host: ['host', 'host', 'view'],
}
// -------------------------------------------------------------------
export async function installAllPatches({ pool }) {
await this.getXapi(pool).installAllPoolPatchesOnAllHosts()
export async function installPatches({ pool, patches, hosts }) {
await this.getXapi(hosts === undefined ? pool : hosts[0]).installPatches({
patches,
hosts,
})
}
installAllPatches.params = {
pool: {
type: 'string',
},
installPatches.params = {
pool: { type: 'string', optional: true },
patches: { type: 'array', optional: true },
hosts: { type: 'array', optional: true },
}
installAllPatches.resolve = {
installPatches.resolve = {
pool: ['pool', 'pool', 'administrate'],
}
installAllPatches.description =
'Install automatically all patches for every hosts of a pool'
installPatches.description = 'Install patches on hosts'
// -------------------------------------------------------------------
@@ -144,6 +145,22 @@ export { uploadPatch as patch }
// -------------------------------------------------------------------
export async function getPatchesDifference({ source, target }) {
return this.getPatchesDifference(target.id, source.id)
}
getPatchesDifference.params = {
source: { type: 'string' },
target: { type: 'string' },
}
getPatchesDifference.resolve = {
source: ['source', 'host', 'view'],
target: ['target', 'host', 'view'],
}
// -------------------------------------------------------------------
export async function mergeInto({ source, target, force }) {
const sourceHost = this.getObject(source.master)
const targetHost = this.getObject(target.master)
@@ -156,21 +173,21 @@ export async function mergeInto({ source, target, force }) {
)
}
const sourcePatches = sourceHost.patches
const targetPatches = targetHost.patches
const counterDiff = differenceBy(sourcePatches, targetPatches, 'name')
const counterDiff = this.getPatchesDifference(source.master, target.master)
if (counterDiff.length > 0) {
throw new Error('host has patches that are not applied on target pool')
const targetXapi = this.getXapi(target)
await targetXapi.installPatches({
patches: await targetXapi.findPatches(counterDiff),
})
}
const diff = differenceBy(targetPatches, sourcePatches, 'name')
// TODO: compare UUIDs
await this.getXapi(source).installSpecificPatchesOnHost(
mapToArray(diff, 'name'),
sourceHost._xapiId
)
const diff = this.getPatchesDifference(target.master, source.master)
if (diff.length > 0) {
const sourceXapi = this.getXapi(source)
await sourceXapi.installPatches({
patches: await sourceXapi.findPatches(diff),
})
}
await this.mergeXenPools(source._xapiId, target._xapiId, force)
}

View File

@@ -1,16 +1,20 @@
import { deprecate } from 'util'
import { getUserPublicProperties } from '../utils'
import { invalidCredentials } from 'xo-common/api-errors'
// ===================================================================
export async function signIn(credentials) {
const user = await this.authenticateUser(credentials)
if (!user) {
throw invalidCredentials()
const { session } = this
const { user, expiration } = await this.authenticateUser(credentials)
session.set('user_id', user.id)
if (expiration === undefined) {
session.unset('expiration')
} else {
session.set('expiration', expiration)
}
this.session.set('user_id', user.id)
return getUserPublicProperties(user)
}

View File

@@ -1,8 +1,9 @@
import asyncMap from '@xen-orchestra/async-map'
import { some } from 'lodash'
import ensureArray from '../_ensureArray'
import { asInteger } from '../xapi/utils'
import { ensureArray, forEach, parseXml } from '../utils'
import { forEach, parseXml } from '../utils'
// ===================================================================

View File

@@ -193,6 +193,11 @@ create.params = {
optional: true,
},
networkConfig: {
type: 'string',
optional: true,
},
coreOs: {
type: 'boolean',
optional: true,
@@ -612,6 +617,8 @@ set.params = {
share: { type: 'boolean', optional: true },
startDelay: { type: 'integer', optional: true },
// set the VM network interface controller
nicType: { type: ['string', 'null'], optional: true },
}
@@ -1461,14 +1468,25 @@ getCloudInitConfig.resolve = {
// -------------------------------------------------------------------
export async function createCloudInitConfigDrive({ vm, sr, config, coreos }) {
export async function createCloudInitConfigDrive({
config,
coreos,
networkConfig,
sr,
vm,
}) {
const xapi = this.getXapi(vm)
if (coreos) {
// CoreOS is a special CloudConfig drive created by XS plugin
await xapi.createCoreOsCloudInitConfigDrive(vm._xapiId, sr._xapiId, config)
} else {
// use generic Cloud Init drive
await xapi.createCloudInitConfigDrive(vm._xapiId, sr._xapiId, config)
await xapi.createCloudInitConfigDrive(
vm._xapiId,
sr._xapiId,
config,
networkConfig
)
}
}
@@ -1476,6 +1494,7 @@ createCloudInitConfigDrive.params = {
vm: { type: 'string' },
sr: { type: 'string' },
config: { type: 'string' },
networkConfig: { type: 'string', optional: true },
}
createCloudInitConfigDrive.resolve = {

View File

@@ -1,5 +1,8 @@
import getStream from 'get-stream'
import { forEach } from 'lodash'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import createNdJsonStream from '../_createNdJsonStream'
// ===================================================================
@@ -17,6 +20,7 @@ export async function exportConfig() {
(req, res) => {
res.writeHead(200, 'OK', {
'content-disposition': 'attachment',
'content-type': 'application/json',
})
return this.exportConfig()
@@ -32,11 +36,9 @@ exportConfig.permission = 'admin'
// -------------------------------------------------------------------
function handleGetAllObjects(req, res, { filter, limit }) {
forEach(this.getObjects({ filter, limit }), object => {
res.write(JSON.stringify(object))
res.write('\n')
})
res.end()
const objects = this.getObjects({ filter, limit })
res.set('Content-Type', 'application/json')
return fromCallback(cb => pipeline(createNdJsonStream(objects), res, cb))
}
export function getAllObjects({ filter, limit, ndjson = false }) {

View File

@@ -10,8 +10,9 @@ import { invalidParameters } from 'xo-common/api-errors'
import { v4 as generateUuid } from 'uuid'
import { includes, remove, filter, find, range } from 'lodash'
import ensureArray from '../_ensureArray'
import { asInteger } from '../xapi/utils'
import { parseXml, ensureArray } from '../utils'
import { parseXml } from '../utils'
const log = createLogger('xo:xosan')

View File

@@ -3,6 +3,7 @@ import assert from 'assert'
import authenticator from 'otplib/authenticator'
import bind from 'lodash/bind'
import blocked from 'blocked'
import compression from 'compression'
import createExpress from 'express'
import createLogger from '@xen-orchestra/log'
import crypto from 'crypto'
@@ -14,17 +15,20 @@ import pw from 'pw'
import serveStatic from 'serve-static'
import startsWith from 'lodash/startsWith'
import stoppable from 'stoppable'
import WebServer from 'http-server-plus'
import WebSocket from 'ws'
import { compile as compilePug } from 'pug'
import { createServer as createProxyServer } from 'http-proxy'
import { fromEvent } from 'promise-toolbox'
import { ifDef } from '@xen-orchestra/defined'
import { join as joinPath } from 'path'
import JsonRpcPeer from 'json-rpc-peer'
import { invalidCredentials } from 'xo-common/api-errors'
import { ensureDir, readdir, readFile } from 'fs-extra'
import WebServer from 'http-server-plus'
import parseDuration from './_parseDuration'
import Xo from './xo'
import {
forEach,
@@ -89,7 +93,9 @@ async function loadConfiguration() {
function createExpressApp(config) {
const app = createExpress()
app.use(helmet())
app.use(helmet(config.http.helmet))
app.use(compression())
// Registers the cookie-parser and express-session middlewares,
// necessary for connect-flash.
@@ -118,7 +124,7 @@ function createExpressApp(config) {
return app
}
async function setUpPassport(express, xo) {
async function setUpPassport(express, xo, { authentication: authCfg }) {
const strategies = { __proto__: null }
xo.registerPassportStrategy = strategy => {
passport.use(strategy)
@@ -176,16 +182,24 @@ async function setUpPassport(express, xo) {
}
})
const PERMANENT_VALIDITY = ifDef(
authCfg.permanentCookieValidity,
parseDuration
)
const SESSION_VALIDITY = ifDef(authCfg.sessionCookieValidity, parseDuration)
const setToken = async (req, res, next) => {
const { user, isPersistent } = req.session
const token = (await xo.createAuthenticationToken({ userId: user.id })).id
const token = await xo.createAuthenticationToken({
expiresIn: isPersistent ? PERMANENT_VALIDITY : SESSION_VALIDITY,
userId: user.id,
})
// Persistent cookie ? => 1 year
// Non-persistent : external provider as Github, Twitter...
res.cookie(
'token',
token,
isPersistent ? { maxAge: 1000 * 60 * 60 * 24 * 365 } : undefined
token.id,
// a session (non-permanent) cookie must not have an expiration date
// because it must not survive browser restart
isPersistent ? { expires: new Date(token.expiration) } : undefined
)
delete req.session.isPersistent
@@ -237,7 +251,7 @@ async function setUpPassport(express, xo) {
xo.registerPassportStrategy(
new LocalStrategy(async (username, password, done) => {
try {
const user = await xo.authenticateUser({ username, password })
const { user } = await xo.authenticateUser({ username, password })
done(null, user)
} catch (error) {
done(null, false, { message: error.message })
@@ -356,6 +370,7 @@ async function makeWebServerListen(
;[opts.cert, opts.key] = await Promise.all([readFile(cert), readFile(key)])
if (opts.key.includes('ENCRYPTED')) {
opts.passphrase = await new Promise(resolve => {
// eslint-disable-next-line no-console
console.log('Encrypted key %s', key)
process.stdout.write(`Enter pass phrase: `)
pw(resolve)
@@ -503,6 +518,11 @@ const setUpApi = (webServer, xo, config) => {
// Connect the WebSocket to the JSON-RPC server.
socket.on('message', message => {
const expiration = connection.get('expiration', undefined)
if (expiration !== undefined && expiration < Date.now()) {
return void connection.close()
}
jsonRpc.write(message)
})
@@ -550,7 +570,7 @@ const setUpConsoleProxy = (webServer, xo) => {
{
const { token } = parseCookies(req.headers.cookie)
const user = await xo.authenticateUser({ token })
const { user } = await xo.authenticateUser({ token })
if (!(await xo.hasPermissions(user.id, [[id, 'operate']]))) {
throw invalidCredentials()
}
@@ -570,6 +590,9 @@ const setUpConsoleProxy = (webServer, xo) => {
proxyConsole(connection, vmConsole, xapi.sessionId)
})
} catch (error) {
try {
socket.end()
} catch (_) {}
console.error((error && error.stack) || error)
}
})
@@ -667,7 +690,7 @@ export default async function main(args) {
// Everything above is not protected by the sign in, allowing xo-cli
// to work properly.
await setUpPassport(express, xo)
await setUpPassport(express, xo, config)
// Attaches express to the web server.
webServer.on('request', express)

Some files were not shown because too many files have changed in this diff Show More