Compare commits

..

200 Commits

Author SHA1 Message Date
badrAZ
d505401446 feat(xo-web): v5.38.0 2019-03-29 14:37:25 +01:00
badrAZ
fafc24aeae feat(xo-server): v5.38.0 2019-03-29 14:35:48 +01:00
badrAZ
f78ef0d208 feat(xo-server-usage-report): v0.7.2 2019-03-29 14:33:08 +01:00
badrAZ
8384cc3652 feat(@xen-orchestra/fs): v0.8.0 2019-03-29 14:27:25 +01:00
badrAZ
60aa18a229 feat(vhd-lib): v0.6.0 2019-03-29 14:11:09 +01:00
badrAZ
3d64b42a89 feat(xen-api): v0.24.6 2019-03-29 14:05:14 +01:00
badrAZ
b301997d4b feat(xo-web): ability to restore a metadata backup (#4023)
Fixes #4004
2019-03-29 13:54:54 +01:00
Enishowk
ab34743250 feat(xo-web/hosts): suggest XCP-ng as alternative to XS Free (#4094)
Fixes #4091
2019-03-29 11:59:52 +01:00
badrAZ
bc14a1d167 feat(xo-web/backup-ng): ability to set the full backup interval (#4099)
Fixes #1783
2019-03-29 11:43:37 +01:00
badrAZ
2886ec116f feat(xo-server/metadata-backups): ability to restore metadata backup (#4096)
See #4004
2019-03-29 11:21:03 +01:00
Julien Fontanet
c2beb2a5fa chore(server/backup-ng-logs): initial documentation 2019-03-29 11:03:34 +01:00
Nicolas Raynaud
d6ac10f527 feat(xo-web/vm-import): improve VM import wording (#4020) 2019-03-29 09:23:39 +01:00
Julien Fontanet
9dcd8a707a feat(xen-api): add connected/disconnected signals 2019-03-28 18:39:33 +01:00
Julien Fontanet
e1e97ef158 chore(xen-api): set empty sessionId to undefined instead of null 2019-03-28 18:39:28 +01:00
Julien Fontanet
5d6b37f81a fix(xen-api/connect): dont stay disconnecting on failure 2019-03-28 18:19:50 +01:00
Julien Fontanet
e1da08ba38 chore(xen-api/connect): assert initially disconnected 2019-03-28 18:19:18 +01:00
Julien Fontanet
1dfb50fefd feat(xo-server/backup): fullInterval setting (#4086)
See #4083
2019-03-28 18:10:05 +01:00
Julien Fontanet
5c06ebc9c8 feat(xen-api/{,dis}connect): dont fail if already in expected state 2019-03-28 17:38:12 +01:00
Julien Fontanet
52a9270fb0 feat(xen-api): coalesce connect calls 2019-03-28 17:30:26 +01:00
Julien Fontanet
82247d7422 chore(xen-api): various changes 2019-03-28 17:30:25 +01:00
Julien Fontanet
b34688043f chore(xen-api): rewrite barrier and createTask 2019-03-28 17:30:24 +01:00
Julien Fontanet
ce4bcbd19d chore(xen-api): move more methods 2019-03-28 17:30:24 +01:00
Pierre Donias
cde9a02c32 fix(xo-server,xo-web,xo-server-usage-report): patches (#4077)
See #2565
See #3655
Fixes #2188
Fixes #3777
Fixes #3783
Fixes #3934
Fixes support#1228
Fixes support#1338
Fixes support#1362

- mergeInto: fix auto-patching on XS < 7.2
- mergeInto: homogenize both the host and pool's patches
- correctly install specific patches
- XCP-ng: fix "xcp-ng-updater not installed" bug
2019-03-28 17:05:04 +01:00
Julien Fontanet
fe1da4ea12 chore(xen-api): _addObject → _addRecordToCache, _removeObject → _removeRecordFromCache 2019-03-28 16:17:53 +01:00
Julien Fontanet
a73306817b chore(xen-api): move more methods 2019-03-28 16:15:09 +01:00
Julien Fontanet
54e683d3d4 chore(xen-api): move getField to object handling helpers section 2019-03-28 16:01:10 +01:00
Enishowk
f49910ca82 feat(xo-web, xo-server): display link to pool (#4045)
Fixes #4041
2019-03-28 15:42:37 +01:00
Julien Fontanet
4052f7f736 chore(xen-api): regroup HTTP requests 2019-03-28 13:58:23 +01:00
Julien Fontanet
b47e097983 feat(xen-api/{get,put}Resource): add inactivity detection (#4090) 2019-03-28 13:55:56 +01:00
Julien Fontanet
e44dbfb2a4 fix(xen-api/examples): use isOpaqueRef private module 2019-03-28 13:30:08 +01:00
Julien Fontanet
7d69dd9400 fix(xen-api): add missing Babel plugin 2019-03-28 12:21:55 +01:00
Julien Fontanet
e6aae8fcfa chore(xen-api): regroup object handling helpers 2019-03-28 12:19:08 +01:00
Julien Fontanet
da800b3391 chore(xo-collection): minor improvements (#4089) 2019-03-28 12:15:04 +01:00
Julien Fontanet
3a574bcecc chore(xen-api): clean call/callAsync code 2019-03-28 12:14:03 +01:00
Julien Fontanet
1bb0e234e7 chore(xen-api): modularize (#4088) 2019-03-28 11:17:25 +01:00
Julien Fontanet
b7e14ebf2a fix(xo-server/snapshotVm): dont retry and unconditionaly clean (#4075)
Fixes #4074
2019-03-28 10:54:50 +01:00
Nicolas Raynaud
2af1207702 feat(vhd-lib,xo-server): guess VHD size on import (#3726) 2019-03-28 10:16:28 +01:00
Julien Fontanet
ecfed30e6e fix(xo-web/JSON schema object input): clear when un-use (#4076) 2019-03-28 10:05:15 +01:00
Enishowk
d06c3e3dd8 fix(xo-web/smart-backup): StringNode → RegExpNode to anchor strings (#4085)
Fixes #4078
2019-03-27 22:11:23 +01:00
Julien Fontanet
16b3fbeb16 fix(scripts/travis-tests): integration tests on branches 2019-03-27 15:45:16 +01:00
Julien Fontanet
0938804947 fix(xo-server/worker): forget remote after use (#4079)
Fixes xoa-support#1378
Fixes xoa-support#1384
Fixes xoa-support#1399
2019-03-27 10:52:42 +01:00
Julien Fontanet
851bcf9816 feat(xo-server/api): close connection when session expires (#4071)
See xoa-support#1389
2019-03-27 10:36:15 +01:00
Julien Fontanet
9f6fc785bc fix(xo-server/remotes): dont sync unnecessarily 2019-03-27 10:04:01 +01:00
Julien Fontanet
56636bf5d4 chore(xo-server/MultiKeyMap): better implementation (#4070)
No longer unnecessarily use a map per entry and avoid creating Node as much as possible.
2019-03-26 17:50:22 +01:00
Julien Fontanet
3899a65167 fix(xo-server/plugin.configure): properly merge previous config 2019-03-26 17:29:33 +01:00
Rajaa.BARHTAOUI
628e53c1c3 feat(xo-web/settings/plugins): display plugin count (#4050)
Fixes #4008
2019-03-26 16:55:55 +01:00
Enishowk
9fa424dd8d fix(xo-web/remotes): test if writeRate or readRate are defined (#4073)
Fixes #4072
2019-03-26 15:35:01 +01:00
Julien Fontanet
3e6f2eecfa chore(xo-server/index): allow console 2019-03-26 09:38:43 +01:00
Julien Fontanet
cc655c8ba8 feat(xo-server): pDebounceWithKey (#4066) 2019-03-25 17:36:41 +01:00
Julien Fontanet
78aa0474ee chore(ESLint): ban console logs 2019-03-25 17:02:40 +01:00
Enishowk
9caefa2f49 fix(xo-web/remotes): remove console.log (#4065) 2019-03-25 17:02:21 +01:00
Julien Fontanet
478726fa3b chore(xo-server/ensureArray): move into own module 2019-03-25 16:48:15 +01:00
Julien Fontanet
f64917ec52 feat(xo-server): configurable session validity (#4059)
See xoa-support#1389
2019-03-25 16:24:27 +01:00
badrAZ
2bc25f91c4 chore(xo-server/XapiStats): improve the cache implementation (#3859) 2019-03-25 10:45:54 +01:00
Julien Fontanet
623d7ffe2f feat(cloud config): Config Drive datasource as a fallback (#4053)
Fixes xoa-support#1179
2019-03-25 09:34:21 +01:00
Julien Fontanet
07510b5099 chore(xo-server/authenticateUser): better doc and explicit tests 2019-03-22 17:58:00 +01:00
Julien Fontanet
9f21f9a7bc chore(xo-server/authenticateUser): throws invalidCredentials instead of returning false 2019-03-22 17:56:58 +01:00
Julien Fontanet
93da70709e fix(xo-web): dont try reconnecting websocket on sign out 2019-03-22 17:34:46 +01:00
Julien Fontanet
00436e744a fix(xo-server/_parseDuration): add missing file
Related to 6baef2450
2019-03-22 16:19:09 +01:00
Julien Fontanet
1e642fc512 chore(xo-server/store): dont access xo._config 2019-03-22 15:45:09 +01:00
Julien Fontanet
6baef2450c feat(xo-server/authentication): configurable token validity 2019-03-22 15:29:11 +01:00
Julien Fontanet
600f34f85a fix(xo-server/console proxy): close socket on authentication error 2019-03-22 11:43:52 +01:00
Julien Fontanet
6c0c6bc5c4 fix(xo-server-recover-account): remove OTP (#4055)
Otherwise it's not possible to recover if the OTP setting has been lost.
2019-03-22 11:10:16 +01:00
Enishowk
fcd62ed3cd feat(remote): add read/write speeds on remote (#4015)
Fixes #3991
2019-03-21 18:25:03 +01:00
Julien Fontanet
785f2e3a6d chore: update Babel
Fixes #4052
2019-03-21 12:33:18 +01:00
Julien Fontanet
c2925f7c1e chore(xo-import-servers-csv): remove csv-parser types
Already available in csv-parser@2.2.0.
2019-03-21 12:31:05 +01:00
Jon Sands
60814d8b58 fix(docs): add pictures to manual seeding instructions (#4019) 2019-03-21 08:38:18 +01:00
Julien Fontanet
2dec448f2c chore: update dependencies 2019-03-20 11:26:05 +01:00
Pierre Donias
b71f4f6800 fix(xo-web/home): always sort by name_label as a secondary sort (#4047)
Fixes #3983
2019-03-20 09:43:06 +01:00
badrAZ
558083a916 chore(CHANGELOG): update next 2019-03-19 15:30:24 +01:00
badrAZ
d507ed9dff feat(xo-web): v5.37.0 2019-03-19 15:05:41 +01:00
badrAZ
7ed0242662 feat(xo-server): v5.37.0 2019-03-19 14:59:59 +01:00
badrAZ
d7b3d989d7 feat(xo-server-auth-google): v0.2.1 2019-03-19 14:51:25 +01:00
badrAZ
707b2f77f0 fix(xo-web/backup-ng): display compression only in full mode (#4021)
Fixes xoa-support#1346
2019-03-19 11:58:11 +01:00
Rajaa.BARHTAOUI
5ddbb76979 feat(xo-web/vm/disk): warning when SRs on 2 different hosts (#3969)
See #3911

Show a warning message when at least 2 VDIs attached to the VM are on 2 local SRs on 2 different hosts because the VM won't be able to start (NO_HOSTS_AVAILABLE)
2019-03-19 11:14:30 +01:00
Enishowk
97b0fe62d4 feat(xo-server/vm.delete): ensure suspend VDI is destroyed (#4038)
Fixes #4027
2019-03-18 10:29:54 +01:00
badrAZ
8ac9b2cdc7 fix(xo-server/xapi-stats): synchronize requests to improve caching (#4028)
Fixes #4017
2019-03-15 11:46:21 +01:00
badrAZ
bc4c1a13e6 chore(xo-server): remove deprecated syntax for decorator-synchronized (#4037) 2019-03-14 18:13:39 +01:00
Julien Fontanet
d3ec303ade feat(xo-server): properly streams NDJSON (#4030) 2019-03-14 11:21:09 +01:00
Rajaa.BARHTAOUI
6cfc2a1ba6 fix(CHANGELOG.unreleased): remove duplicate entry (#4034) 2019-03-13 10:54:40 +01:00
Enishowk
e15cadc863 feat(xo-web/home): add current page in url (#3999)
Fixes #3993
2019-03-13 08:54:30 +01:00
Julien Fontanet
2f9284c263 fix(xo-server/sample.config.toml): datadir is not in redis section 2019-03-12 22:39:37 +01:00
badrAZ
2465852fd6 chore(xo-web): rename ret. to retention (#4029) 2019-03-12 15:18:03 +01:00
badrAZ
a9f48a0d50 fix(xo-web/migrateVms): VM disks migrated to the wrong SR (#3987)
Fixes #3986
2019-03-12 14:36:56 +01:00
badrAZ
4ed0035c67 chore(xo-server/xapi-stats): add documentation (#4031) 2019-03-12 13:24:34 +01:00
Rajaa.BARHTAOUI
b66f2dfb80 feat(xo-web/vm/disks): same-pool SRs first in migrate selector (#3996)
Fixes #3945
2019-03-12 11:44:38 +01:00
Julien Fontanet
3cb155b129 feat(CHANGELOG.unreleased): add compression 2019-03-12 11:04:12 +01:00
Julien Fontanet
df7efc04e2 feat(backup NG logs): use NDJSON (#4026)
Fixes #4025
2019-03-12 11:02:15 +01:00
Rajaa.BARHTAOUI
a21a8457a4 feat(xo-web/new/vm): warning when SRs not on same host (#3967)
See #3911
2019-03-12 10:20:40 +01:00
Julien Fontanet
020955f535 chore(yarn.lock): refresh 2019-03-11 21:05:48 +01:00
Julien Fontanet
51f23a5f03 chore(xo-web): update otplib to 11.0.0 2019-03-11 19:07:51 +01:00
Julien Fontanet
d024319441 fix(xo-server-auth-google): update passport-google-oauth20 to 2.0.0 2019-03-11 19:07:34 +01:00
Julien Fontanet
f8f35938c0 feat(xo-server): set NODE_ENV to production 2019-03-11 18:52:25 +01:00
Julien Fontanet
2573ace368 feat(xo-server): enable HTTP compression 2019-03-11 18:27:05 +01:00
Rajaa.BARHTAOUI
6bf7269814 feat(xo-server,xo-web/VM): start delay (#4002)
Fixes #3909
2019-03-11 15:39:10 +01:00
Julien Fontanet
6695c7bf5e feat(CHANGELOG): 5.32.1 and 5.32.2 2019-03-11 15:23:51 +01:00
Julien Fontanet
44a83fd817 fix(docs/cr/seed): fix CLI package name 2019-03-06 19:23:33 +01:00
Enishowk
08ddfe0649 feat(VM creation): support automatic networks (#3958)
Fixes #3916
2019-03-06 14:46:22 +01:00
Enishowk
5ba170bf1f feat(xo-web/SR/disks): disable actions on unmanaged VDIs (#4000)
Fixes #3988
2019-03-06 09:40:07 +01:00
Julien Fontanet
8150d3110c fix(vhd-cli/repl): various fixes 2019-03-05 11:46:14 +01:00
Pierre Donias
312b33ae85 fix(xo-web/new-network): PIF should not be required (#4010)
Introduced by 7a2a88b7ad

Requiring a PIF prevented from creating private networks
2019-03-04 17:45:48 +01:00
Julien Fontanet
008eb995ed feat(vhd-cli): 0.3.0 2019-03-01 20:07:58 +01:00
Julien Fontanet
6d8848043c feat(vhd-cli): repl command 2019-03-01 20:00:18 +01:00
Julien Fontanet
cf572c0cc5 feat(xo-server): 5.36.3 2019-03-01 17:21:09 +01:00
Julien Fontanet
18cfa7dd29 feat(xen-api): 0.24.5 2019-03-01 17:20:19 +01:00
Julien Fontanet
72cac2bbd6 chore(xen-api/json-rpc): link to XenCenter code 2019-03-01 16:41:15 +01:00
Julien Fontanet
48ffa28e0b fix(xen-api/_watchEvents): timeout must be a float
Required by XML-RPC transport (XenServer < 7.3).
2019-03-01 16:39:49 +01:00
Julien Fontanet
2e6baeb95a feat(xo-server): 5.36.2 2019-03-01 13:53:28 +01:00
Julien Fontanet
3b5650dc1e feat(xen-api): 0.24.4 2019-03-01 13:52:26 +01:00
Julien Fontanet
3279728e4b chore(xen-api/events): prints errors 2019-03-01 13:42:13 +01:00
Julien Fontanet
fe0dcbacc5 fix(xen-api/_watchEvents): pTimeout expects milliseconds 2019-03-01 13:40:03 +01:00
Julien Fontanet
7c5d90fe40 feat(xo-server/createCloudInit): support network config (#3997)
* feat(xo-server/createCloudInit): support network config

See #3872

* Update index.js
2019-03-01 09:50:37 +01:00
marcpezin
944dad6e36 feat(docs): metadata backups (#4001) 2019-03-01 09:49:25 +01:00
Julien Fontanet
6713d3ec66 chore: update dependencies 2019-03-01 09:44:12 +01:00
Julien Fontanet
6adadb2359 feat(xo-server): 5.35.1 2019-02-28 18:32:17 +01:00
Julien Fontanet
b01096876c feat(fs): 0.7.1 2019-02-28 18:31:58 +01:00
Julien Fontanet
60243d8517 fix(fs/_mount/_sync): dont use --target
Because it also checks the parents of the path.
2019-02-28 18:31:57 +01:00
badrAZ
94d0809380 chore(CHANGELOG): v5.32.0 2019-02-28 17:42:40 +01:00
badrAZ
e935dd9bad feat(xo-web): v5.36.0 2019-02-28 17:35:29 +01:00
badrAZ
30aa2b83d0 feat(xo-server): v5.36.0 2019-02-28 17:33:36 +01:00
badrAZ
fc42c58079 feat(xen-api): v0.24.3 2019-02-28 17:20:59 +01:00
badrAZ
ee9443cf16 feat(@xen-orchestra/fs): v0.7.0 2019-02-28 17:17:33 +01:00
Julien Fontanet
f91d4a07eb fix(xen-api/_watchEvents): dont stop when fail to get records 2019-02-28 16:32:30 +01:00
Julien Fontanet
c5a5ef6c93 fix(xen-api/_watchEvents): dont fetch events while fetching tasks
When our tasks cache is desynchronized we re-fetch all tasks.

We must wait before fetching the next events to have fetch the tasks otherwise we risk a race condition.
2019-02-28 16:30:39 +01:00
Julien Fontanet
7559fbdab7 chore: update to http-request-plus 0.7.2
Work around a Node issue which led to incorrect *aborted* error events.
2019-02-28 16:21:07 +01:00
Julien Fontanet
7925ee8fee fix(fs/_mount#_sync): use findmnt to check mount success (#4003)
Fixes #3973
2019-02-28 15:32:06 +01:00
badrAZ
fea5117ed8 feat(metadata backup): backup XO config and pool metadata (#3912)
Fixes #3501
2019-02-28 15:31:17 +01:00
Julien Fontanet
468a2c5bf3 fix(xen-api/connect): always pass params to _transporCall 2019-02-28 12:36:57 +01:00
Julien Fontanet
c728eeaffa feat(fs/mount): keep open file on mount to avoid external umount (#3998) 2019-02-28 11:52:45 +01:00
Julien Fontanet
6aa8e0d4ce feat(xo-server/CR): share full between schedules (#3995)
Fixes #3973
2019-02-27 16:36:22 +01:00
Enishowk
76ae54ff05 feat(xo-web): add button to download log (#3985)
Fixes #3957
2019-02-27 10:02:30 +01:00
Julien Fontanet
344e9e06d0 feat(xen-api/objects): buffer objects' events on initial fetch (#3994)
XO requires all objects to be available at the same time.
2019-02-26 15:03:33 +01:00
Julien Fontanet
d866bccf3b chore(xen-api/getResource): options are optional 2019-02-26 14:44:55 +01:00
Julien Fontanet
3931c4cf4c chore(xo-server/snapshotVm): eventless implementation (#3992)
Previous implementation relied on events but had issues where it did not correctly detect and remove broken quiesced snapshot.

The new implementation is less magical and does not rely on events at all.
2019-02-26 14:41:55 +01:00
Julien Fontanet
420f1c77a1 fix: XAPI record types are now properly cased 2019-02-26 09:45:57 +01:00
Julien Fontanet
59106aa29e chore(xen-api/_watchEvents): new implementation (#3990)
- fetch each types independently: no more huge requests
- only fall back to legacy implementation if `event.inject` is not available
- can only watch certain types
- `Xapi#objectsFetched` is a promise which resolves when objects have been fetched
2019-02-26 09:45:21 +01:00
Julien Fontanet
4216a5808a chore(xen-api/setFieldEntry): always returns undefined 2019-02-24 18:17:26 +01:00
Julien Fontanet
12a7000e36 fix(xen-api): correct $type for records from event
XenApi event system returns lowercased types which things difficult, for
instance, `Record#set_name_label` methods did not work for some VM
because the lib called `vm.set_name_label` instead of
`VM.set_name_label`.

To work-around this problem, a map of types from lowercased is
constructed at connection.
2019-02-24 18:17:26 +01:00
Jon Sands
685355c6fb fix(docs): clarify build and fix link
- from sources: clarify yarn build
- backups: fix quiesce link
2019-02-24 13:27:16 +01:00
Julien Fontanet
66f685165e feat(xen-api/Record#update_): easier use for single entry
```js
// before
await object.update_property({
  entry: 'value',
})

// after
await object.update_property('entry', 'value')
```
2019-02-22 19:51:36 +01:00
Julien Fontanet
8e8b1c009a feat(xen-api#unsetField): replaced by setField(_, null) 2019-02-22 19:51:36 +01:00
Julien Fontanet
705d069246 feat(xen-api#getField): get a specific record field 2019-02-22 19:51:35 +01:00
Julien Fontanet
58e8d75935 chore(xen-api/*setField*): take separate type and ref 2019-02-22 19:51:34 +01:00
Julien Fontanet
5eb1454e67 fix(xen-api/_transportCall): avoid logging session ID 2019-02-22 19:51:34 +01:00
Julien Fontanet
04b31db41b feat(xen-api/getRecords): fetch multiple records 2019-02-22 19:51:33 +01:00
badrAZ
29b4cf414a fix(xo-server/xen-servers): pool property not deleted on disconnecting a connecting server (#3977)
Fixes #3976
2019-02-21 17:15:39 +01:00
Rajaa.BARHTAOUI
7a2a88b7ad feat(xo-web/new-network): dedicated view (#3906)
Fixes #3895
2019-02-21 11:43:40 +01:00
Nicolas Raynaud
dc34f3478d fix(xo-web): strip XML prefixes in OVA import parser (#3974)
Fixes #3962

- Parses the OVF XML without taking into account any namespace.
- Empty the import screen when we drop a new file on the drop zone to avoid displaying stale information during long parsing.
2019-02-21 09:24:01 +01:00
Julien Fontanet
58175a4f5e chore(ESLint): update config 2019-02-20 11:05:57 +01:00
badrAZ
c4587c11bd feat(xo-web/multipathing): display multipathing required state info (#3975) 2019-02-19 12:00:04 +01:00
Julien Fontanet
5b1a5f4fe7 feat(xo-web/editable): blur always submits (#3980)
Previous behavior (blur cancels) was surprising to users.

Enter still submits and Escape still cancels.
2019-02-19 11:29:50 +01:00
Jon Sands
ee2db918f3 feat(docs/from sources): Debian 8 → 9 (#3978)
* update cloud init docu

* update cloudinit images

* fix png links

* add emergency shutdown feature doc

* fix emergency shutdown typo

* Update to Debian 9 recommendation
2019-02-19 09:56:47 +01:00
Julien Fontanet
0695bafb90 fix(xen-api#_transportCall): pTimeout.call
Fixes 8e116063b
2019-02-17 19:39:11 +01:00
Julien Fontanet
8e116063bf feat(xen-api#_transportCall): timeout after 24 hours 2019-02-15 17:37:45 +01:00
Julien Fontanet
3f3b372f89 feat(xapi/Record#$xapi): link connection from record 2019-02-15 17:29:00 +01:00
Julien Fontanet
24cc1e8e29 chore(xo-server/pRetry): more tests 2019-02-15 14:38:12 +01:00
Julien Fontanet
e988ad4df9 chore: add package.repository.directory
See npm/rfcs#19
2019-02-15 14:38:11 +01:00
Julien Fontanet
5c12d4a546 chore(fs/PrefixWrapper): _remote → _handler 2019-02-15 14:38:11 +01:00
Enishowk
d90b85204d feat(xo-web): sort VMs by start time (#3970)
Fixes #3955
2019-02-15 10:09:53 +01:00
badrAZ
6332355031 fix(xo-server/multipathing): disable host before unplugging PBDs (#3965) 2019-02-14 16:03:48 +01:00
Rajaa.BARHTAOUI
4ce702dfdf feat(xo-web/vm/migrate): same-pool hosts first in selector (#3890)
Fixes #3262
2019-02-14 11:55:58 +01:00
Pierre Donias
362a381dfb fix(xo-web/getMessages): handle errors (#3966) 2019-02-13 18:15:54 +01:00
Enishowk
0eec4ee2f7 fix(xo-server,xo-web/VM): hide creation date if not available (#3959)
Fixes #3953
2019-02-13 14:01:45 +01:00
badrAZ
b92390087b fix(xo-server/host): multipathing status for XS < 7.5 (#3961)
Fixes #3956
2019-02-12 17:36:33 +01:00
Jon Sands
bce4d5d96f (Docu) Add page for emergency shutdown feature (#3960)
Fix emergency shutdown typo
2019-02-12 10:55:18 +01:00
Pierre Donias
27262ff3e8 fix(CHANGELOG): wrong version 2019-02-08 13:57:16 +01:00
Pierre Donias
444b6642f1 chore(CHANGELOG): 5.31.1 2019-02-08 13:49:44 +01:00
Pierre Donias
67d11020bb feat(xo-web): 5.35.0 2019-02-08 13:45:36 +01:00
Pierre Donias
7603974370 feat(xo-server): 5.35.0 2019-02-08 13:45:04 +01:00
Pierre Donias
6cb5639243 feat(xo-server-auth-saml): 0.5.3 2019-02-08 13:44:11 +01:00
Pierre Donias
0c5a37d8a3 feat(fs): 0.6.1 2019-02-08 13:42:52 +01:00
Pierre Donias
78cc7fe664 feat(xen-api): 0.24.2 2019-02-08 13:39:09 +01:00
Julien Fontanet
2d51bef390 feat(xo-server/snapshotVm): retry when VM_SNAPSHOT_WITH_QUIESCE_FAILED (#3952)
Fixes #3938
2019-02-08 13:16:08 +01:00
Julien Fontanet
bc68fff079 fix(CHANGELOG.unreleased): move items from fixes to enhancement 2019-02-08 11:19:49 +01:00
Nicolas Raynaud
0a63acac73 fix(OVA import): fix tar file size parsing (#3941)
Avoids relying on PAX header, uses a weird encoding in the normal filesize header.

Fixes #3900
2019-02-07 22:51:38 +01:00
Julien Fontanet
e484b073e1 feat(xo-server/moveVdi): retry on TOO_MANY_STORAGE_MIGRATES (#3940)
Fix xoa-support#1222
2019-02-07 17:46:41 +01:00
Julien Fontanet
b2813d7cc0 feat(xo-server/snapshotVm): detect and destroy broken quiesced snapshots (#3937)
Fixes #3936
2019-02-07 17:37:09 +01:00
Julien Fontanet
29b941868d feat(xen-api): work-around empty VBD#VDI XenServer issue (#3950) 2019-02-07 16:44:42 +01:00
Julien Fontanet
37af47ecff fix(xo-server/remote.getAllInfo): reduce timeout to 5s 2019-02-07 14:17:16 +01:00
Julien Fontanet
8eb28d40da feat(vhd-cli): display version in usage 2019-02-07 14:17:15 +01:00
Jon Sands
383dd7b38e feat(docs/cloudinit): various changes (#3942)
- Removed the "CloudInit support is available in the 4.11 release and higher" message - is anyone still using XOA this many years old?  
- Added a note about our change to the configdrive type, and notes for users who have customized their datasources to look for only openstack (inspired by a customer)  
- Updated all screenshots to the modern XOA UI.
2019-02-07 11:52:04 +01:00
Rajaa.BARHTAOUI
b13b3fe9f6 feat(xo-web/vm/disk): display device name (#3946)
Fixes #3902
2019-02-07 09:41:26 +01:00
Enishowk
04a5f55b16 feat(xo-web/VM): expose the creation date of the VM (#3947)
Fixes #3932
2019-02-07 09:19:09 +01:00
Rajaa.BARHTAOUI
4ab1de918e feat(xo-web/home): set description on bulk snapshot (#3933)
Fixes #3925
2019-02-06 10:41:35 +01:00
Julien Fontanet
44fc5699fd chore(xo-server): upgrade jest-worker to 24.0.0
Fixes #3929.

Related to jest#7182.
2019-02-05 18:32:03 +01:00
Julien Fontanet
dd6c3ff434 feat(docs/backups): add link to introduction video 2019-02-05 17:21:12 +01:00
Enishowk
d747b937ee fix(@xen-orchestra/fs): don't ignore mount options (#3931)
Fixes #3935
2019-02-05 17:19:09 +01:00
Julien Fontanet
9aa63d0354 fix(xo-server/backup NG): fix error condition (#3939)
Fix #3875
2019-02-05 16:44:28 +01:00
Julien Fontanet
36220ac1c5 feat(docs/from sources): add cifs-utils dependency 2019-02-05 10:22:40 +01:00
Julien Fontanet
d8eb5d4934 chore(.editorconfig): uniformize indent to 2 spaces 2019-02-04 18:01:09 +01:00
Julien Fontanet
b580ea98a7 fix(xo-server-auth-saml): AssertionConsumerServiceURL matches callback URL
Fixes xoa-support#1235
2019-02-04 16:21:26 +01:00
Julien Fontanet
0ad68c2280 chore(PULL_REQUEST_TEMPLATE): CHANGELOG → CHANGELOG.unreleased.md 2019-02-04 13:47:57 +01:00
Julien Fontanet
b16f1899ac chore(CHANGELOG.unreleased): contains unreleased changes
Inspired by [Prettier](https://github.com/prettier/prettier/blob/master/CHANGELOG.unreleased.md).

Changes should go there instead of CHANGELOG, they will be moved during the release process.

This change should prevent the issue where old updated PRs added changes at incorrect positions in the CHANGELOG.
2019-02-04 13:43:29 +01:00
ETL
7e740a429a feat(docs): add coalescing troubleshooting tip (#3927) 2019-02-04 13:26:34 +01:00
Pierre Donias
61b1bd2533 fix(xo-web/host): show actual host's RAM usage (#3924)
Instead of the sum of each VM's RAM usage
2019-02-01 12:03:52 +01:00
Pierre Donias
d6ddba8e56 feat(xo-server): 5.34.1 2019-02-01 09:31:42 +01:00
Julien Fontanet
d10c7f3898 fix(xo-server/package.files): config.json → config.toml 2019-02-01 09:12:18 +01:00
Pierre Donias
2b2c2c42f1 chore(CHANGELOG): 5.31.0 2019-01-31 15:37:39 +01:00
Pierre Donias
efc65a0669 feat(xo-web): 5.34.0 2019-01-31 15:32:03 +01:00
Pierre Donias
d8e0727d4d feat(xo-server): 5.34.0 2019-01-31 15:31:28 +01:00
222 changed files with 8654 additions and 4220 deletions

View File

@@ -3,63 +3,12 @@
# Julien Fontanet's configuration
# https://gist.github.com/julien-f/8096213
# Top-most EditorConfig file.
root = true
# Common config.
[*]
charset = utf-8
end_of_line = lf
indent_size = 2
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
# CoffeeScript
#
# https://github.com/polarmobile/coffeescript-style-guide/blob/master/README.md
[*.{,lit}coffee]
indent_size = 2
indent_style = space
# Markdown
[*.{md,mdwn,mdown,markdown}]
indent_size = 4
indent_style = space
# Package.json
#
# This indentation style is the one used by npm.
[package.json]
indent_size = 2
indent_style = space
# Pug (Jade)
[*.{jade,pug}]
indent_size = 2
indent_style = space
# JavaScript
#
# Two spaces seems to be the standard most common style, at least in
# Node.js (http://nodeguide.com/style.html#tabs-vs-spaces).
[*.{js,jsx,ts,tsx}]
indent_size = 2
indent_style = space
# Less
[*.less]
indent_size = 2
indent_style = space
# Sass
#
# Style used for http://libsass.com
[*.s[ac]ss]
indent_size = 2
indent_style = space
# YAML
#
# Only spaces are allowed.
[*.yaml]
indent_size = 2
indent_style = space

View File

@@ -1,5 +1,11 @@
module.exports = {
extends: ['standard', 'standard-jsx', 'prettier'],
extends: [
'standard',
'standard-jsx',
'prettier',
'prettier/standard',
'prettier/react',
],
globals: {
__DEV__: true,
$Dict: true,
@@ -10,6 +16,16 @@ module.exports = {
$PropertyType: true,
$Shape: true,
},
overrides: [
{
files: ['packages/*cli*/**/*.js', '*-cli.js'],
rules: {
'no-console': 'off',
},
},
],
parser: 'babel-eslint',
parserOptions: {
ecmaFeatures: {
@@ -17,12 +33,10 @@ module.exports = {
},
},
rules: {
'no-console': ['error', { allow: ['warn', 'error'] }],
'no-var': 'error',
'node/no-extraneous-import': 'error',
'node/no-extraneous-require': 'error',
'prefer-const': 'error',
// See https://github.com/prettier/eslint-config-prettier/issues/65
'react/jsx-indent': 'off',
},
}

View File

@@ -7,6 +7,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/async-map",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/async-map",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -46,6 +46,12 @@ const getConfig = (key, ...args) => {
: config
}
// some plugins must be used in a specific order
const pluginsOrder = [
'@babel/plugin-proposal-decorators',
'@babel/plugin-proposal-class-properties',
]
module.exports = function(pkg, plugins, presets) {
plugins === undefined && (plugins = {})
presets === undefined && (presets = {})
@@ -61,7 +67,13 @@ module.exports = function(pkg, plugins, presets) {
return {
comments: !__PROD__,
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
plugins: Object.keys(plugins)
.map(plugin => [plugin, plugins[plugin]])
.sort(([a], [b]) => {
const oA = pluginsOrder.indexOf(a)
const oB = pluginsOrder.indexOf(b)
return oA !== -1 && oB !== -1 ? oA - oB : a < b ? -1 : 1
}),
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
}
}

View File

@@ -5,6 +5,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/babel-config",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/babel-config",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
}

View File

@@ -82,35 +82,26 @@ ${cliName} v${pkg.version}
)
await Promise.all([
srcXapi.setFieldEntries(srcSnapshot, 'other_config', metadata),
srcXapi.setFieldEntries(srcSnapshot, 'other_config', {
'xo:backup:exported': 'true',
}),
tgtXapi.setField(
tgtVm,
'name_label',
`${srcVm.name_label} (${srcSnapshot.snapshot_time})`
),
tgtXapi.setFieldEntries(tgtVm, 'other_config', metadata),
tgtXapi.setFieldEntries(tgtVm, 'other_config', {
srcSnapshot.update_other_config(metadata),
srcSnapshot.update_other_config('xo:backup:exported', 'true'),
tgtVm.set_name_label(`${srcVm.name_label} (${srcSnapshot.snapshot_time})`),
tgtVm.update_other_config(metadata),
tgtVm.update_other_config({
'xo:backup:sr': tgtSr.uuid,
'xo:copy_of': srcSnapshotUuid,
}),
tgtXapi.setFieldEntries(tgtVm, 'blocked_operations', {
start:
'Start operation for this vm is blocked, clone it if you want to use it.',
}),
tgtVm.update_blocked_operations(
'start',
'Start operation for this vm is blocked, clone it if you want to use it.'
),
Promise.all(
userDevices.map(userDevice => {
const srcDisk = srcDisks[userDevice]
const tgtDisk = tgtDisks[userDevice]
return tgtXapi.setFieldEntry(
tgtDisk,
'other_config',
'xo:copy_of',
srcDisk.uuid
)
return tgtDisk.update_other_config({
'xo:copy_of': srcDisk.uuid,
})
})
),
])

View File

@@ -4,6 +4,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/cr-seed-cli",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/cr-seed-cli",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -15,6 +16,6 @@
},
"dependencies": {
"golike-defer": "^0.4.1",
"xen-api": "^0.24.1"
"xen-api": "^0.24.6"
}
}

View File

@@ -17,6 +17,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/cron",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/cron",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -7,6 +7,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/defined",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/defined",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -7,6 +7,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/emit-async",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/emit-async",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -1,12 +1,13 @@
{
"name": "@xen-orchestra/fs",
"version": "0.6.0",
"version": "0.8.0",
"license": "AGPL-3.0",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/fs",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -23,11 +24,12 @@
"@marsaud/smb2": "^0.13.0",
"@sindresorhus/df": "^2.1.0",
"@xen-orchestra/async-map": "^0.0.0",
"decorator-synchronized": "^0.5.0",
"execa": "^1.0.0",
"fs-extra": "^7.0.0",
"get-stream": "^4.0.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.11.0",
"promise-toolbox": "^0.12.1",
"readable-stream": "^3.0.6",
"through2": "^3.0.0",
"tmp": "^0.0.33",
@@ -43,7 +45,7 @@
"async-iterator-to-stream": "^1.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"dotenv": "^6.1.0",
"dotenv": "^7.0.0",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2"
},

View File

@@ -1,5 +1,6 @@
import execa from 'execa'
import fs from 'fs-extra'
import { ignoreErrors } from 'promise-toolbox'
import { join } from 'path'
import { tmpdir } from 'os'
@@ -21,7 +22,13 @@ export default class MountHandler extends LocalHandler {
super(remote, opts)
this._execa = useSudo ? sudoExeca : execa
this._params = params
this._keeper = undefined
this._params = {
...params,
options: [params.options, remote.options]
.filter(_ => _ !== undefined)
.join(','),
}
this._realPath = join(
mountsDir,
remote.id ||
@@ -32,19 +39,20 @@ export default class MountHandler extends LocalHandler {
}
async _forget() {
await this._execa('umount', ['--force', this._getRealPath()], {
env: {
LANG: 'C',
},
}).catch(error => {
if (
error == null ||
typeof error.stderr !== 'string' ||
!error.stderr.includes('not mounted')
) {
throw error
}
})
const keeper = this._keeper
if (keeper === undefined) {
return
}
this._keeper = undefined
await fs.close(keeper)
await ignoreErrors.call(
this._execa('umount', [this._getRealPath()], {
env: {
LANG: 'C',
},
})
)
}
_getRealPath() {
@@ -52,26 +60,49 @@ export default class MountHandler extends LocalHandler {
}
async _sync() {
await fs.ensureDir(this._getRealPath())
const { type, device, options, env } = this._params
return this._execa(
'mount',
['-t', type, device, this._getRealPath(), '-o', options],
{
env: {
LANG: 'C',
...env,
},
// in case of multiple `sync`s, ensure we properly close previous keeper
{
const keeper = this._keeper
if (keeper !== undefined) {
this._keeper = undefined
ignoreErrors.call(fs.close(keeper))
}
).catch(error => {
let stderr
if (
error == null ||
typeof (stderr = error.stderr) !== 'string' ||
!(stderr.includes('already mounted') || stderr.includes('busy'))
) {
}
const realPath = this._getRealPath()
await fs.ensureDir(realPath)
try {
const { type, device, options, env } = this._params
await this._execa(
'mount',
['-t', type, device, realPath, '-o', options],
{
env: {
LANG: 'C',
...env,
},
}
)
} catch (error) {
try {
// the failure may mean it's already mounted, use `findmnt` to check
// that's the case
await this._execa('findmnt', [realPath], {
stdio: 'ignore',
})
} catch (_) {
throw error
}
})
}
// keep an open file on the mount to prevent it from being unmounted if used
// by another handler/process
const keeperPath = `${realPath}/.keeper_${Math.random()
.toString(36)
.slice(2)}`
this._keeper = await fs.open(keeperPath, 'w')
ignoreErrors.call(fs.unlink(keeperPath))
}
}

View File

@@ -5,6 +5,7 @@ import getStream from 'get-stream'
import asyncMap from '@xen-orchestra/async-map'
import path from 'path'
import synchronized from 'decorator-synchronized'
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
import { parse } from 'xo-remote-parser'
import { randomBytes } from 'crypto'
@@ -24,6 +25,10 @@ type RemoteInfo = { used?: number, size?: number }
type File = FileDescriptor | string
const checksumFile = file => file + '.checksum'
const computeRate = (hrtime: number[], size: number) => {
const seconds = hrtime[0] + hrtime[1] / 1e9
return size / seconds
}
const DEFAULT_TIMEOUT = 6e5 // 10 min
@@ -34,18 +39,18 @@ const ignoreEnoent = error => {
}
class PrefixWrapper {
constructor(remote, prefix) {
constructor(handler, prefix) {
this._prefix = prefix
this._remote = remote
this._handler = handler
}
get type() {
return this._remote.type
return this._handler.type
}
// necessary to remove the prefix from the path with `prependDir` option
async list(dir, opts) {
const entries = await this._remote.list(this._resolve(dir), opts)
const entries = await this._handler.list(this._resolve(dir), opts)
if (opts != null && opts.prependDir) {
const n = this._prefix.length
entries.forEach((entry, i, entries) => {
@@ -56,7 +61,7 @@ class PrefixWrapper {
}
rename(oldPath, newPath) {
return this._remote.rename(this._resolve(oldPath), this._resolve(newPath))
return this._handler.rename(this._resolve(oldPath), this._resolve(newPath))
}
_resolve(path) {
@@ -216,6 +221,7 @@ export default class RemoteHandlerAbstract {
// FIXME: Some handlers are implemented based on system-wide mecanisms (such
// as mount), forgetting them might breaking other processes using the same
// remote.
@synchronized()
async forget(): Promise<void> {
await this._forget()
}
@@ -354,23 +360,33 @@ export default class RemoteHandlerAbstract {
// metadata
//
// This method MUST ALWAYS be called before using the handler.
@synchronized()
async sync(): Promise<void> {
await this._sync()
}
async test(): Promise<Object> {
const SIZE = 1024 * 1024 * 10
const testFileName = normalizePath(`${Date.now()}.test`)
const data = await fromCallback(cb => randomBytes(1024 * 1024, cb))
const data = await fromCallback(cb => randomBytes(SIZE, cb))
let step = 'write'
try {
const writeStart = process.hrtime()
await this._outputFile(testFileName, data, { flags: 'wx' })
const writeDuration = process.hrtime(writeStart)
step = 'read'
const readStart = process.hrtime()
const read = await this._readFile(testFileName, { flags: 'r' })
const readDuration = process.hrtime(readStart)
if (!data.equals(read)) {
throw new Error('output and input did not match')
}
return {
success: true,
writeRate: computeRate(writeDuration, SIZE),
readRate: computeRate(readDuration, SIZE),
}
} catch (error) {
return {
@@ -565,7 +581,7 @@ function createPrefixWrapperMethods() {
if (arguments.length !== 0 && typeof (path = arguments[0]) === 'string') {
arguments[0] = this._resolve(path)
}
return value.apply(this._remote, arguments)
return value.apply(this._handler, arguments)
}
defineProperty(pPw, name, descriptor)

View File

@@ -16,6 +16,8 @@ class TestHandler extends AbstractHandler {
}
}
jest.useFakeTimers()
describe('closeFile()', () => {
it(`throws in case of timeout`, async () => {
const testHandler = new TestHandler({

View File

@@ -290,9 +290,11 @@ handlers.forEach(url => {
describe('#test()', () => {
it('tests the remote appears to be working', async () => {
expect(await handler.test()).toEqual({
success: true,
})
const answer = await handler.test()
expect(answer.success).toBe(true)
expect(typeof answer.writeRate).toBe('number')
expect(typeof answer.readRate).toBe('number')
})
})

View File

@@ -6,12 +6,11 @@ const DEFAULT_NFS_OPTIONS = 'vers=3'
export default class NfsHandler extends MountHandler {
constructor(remote, opts) {
const { host, port, path, options } = parse(remote.url)
const { host, port, path } = parse(remote.url)
super(remote, opts, {
type: 'nfs',
device: `${host}${port !== undefined ? ':' + port : ''}:${path}`,
options:
DEFAULT_NFS_OPTIONS + (options !== undefined ? `,${options}` : ''),
options: DEFAULT_NFS_OPTIONS,
})
}

View File

@@ -5,19 +5,13 @@ import normalizePath from './_normalizePath'
export default class SmbMountHandler extends MountHandler {
constructor(remote, opts) {
const {
domain = 'WORKGROUP',
host,
options,
password,
path,
username,
} = parse(remote.url)
const { domain = 'WORKGROUP', host, password, path, username } = parse(
remote.url
)
super(remote, opts, {
type: 'cifs',
device: '//' + host + normalizePath(path),
options:
`domain=${domain}` + (options !== undefined ? `,${options}` : ''),
options: `domain=${domain}`,
env: {
USER: username,
PASSWD: password,

View File

@@ -7,6 +7,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/log",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/log",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -30,7 +31,7 @@
},
"dependencies": {
"lodash": "^4.17.4",
"promise-toolbox": "^0.11.0"
"promise-toolbox": "^0.12.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -7,6 +7,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/mixin",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/mixin",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -1,9 +1,112 @@
# ChangeLog
## *next*
## Next (2019-03-19)
### Enhancements
- [SR/Disk] Disable actions on unmanaged VDIs [#3988](https://github.com/vatesfr/xen-orchestra/issues/3988) (PR [#4000](https://github.com/vatesfr/xen-orchestra/pull/4000))
- [Pool] Specify automatic networks on a Pool [#3916](https://github.com/vatesfr/xen-orchestra/issues/3916) (PR [#3958](https://github.com/vatesfr/xen-orchestra/pull/3958))
- [VM/advanced] Manage start delay for VM [#3909](https://github.com/vatesfr/xen-orchestra/issues/3909) (PR [#4002](https://github.com/vatesfr/xen-orchestra/pull/4002))
- [New/Vm] SR section: Display warning message when the selected SRs aren't in the same host [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3967](https://github.com/vatesfr/xen-orchestra/pull/3967))
- Enable compression for HTTP requests (and initial objects fetch)
- [VDI migration] Display same-pool SRs first in the selector [#3945](https://github.com/vatesfr/xen-orchestra/issues/3945) (PR [#3996](https://github.com/vatesfr/xen-orchestra/pull/3996))
- [Home] Save the current page in url [#3993](https://github.com/vatesfr/xen-orchestra/issues/3993) (PR [#3999](https://github.com/vatesfr/xen-orchestra/pull/3999))
- [VDI] Ensure suspend VDI is destroyed when destroying a VM [#4027](https://github.com/vatesfr/xen-orchestra/issues/4027) (PR [#4038](https://github.com/vatesfr/xen-orchestra/pull/4038))
- [VM/disk]: Warning when 2 VDIs are on 2 different hosts' local SRs [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3969](https://github.com/vatesfr/xen-orchestra/pull/3969))
### Bug fixes
- [New network] PIF was wrongly required which prevented from creating a private network (PR [#4010](https://github.com/vatesfr/xen-orchestra/pull/4010))
- [Google authentication] Migrate to new endpoint
- [Backup NG] Better handling of huge logs [#4025](https://github.com/vatesfr/xen-orchestra/issues/4025) (PR [#4026](https://github.com/vatesfr/xen-orchestra/pull/4026))
- [Home/VM] Bulk migration: fixed VM VDIs not migrated to the selected SR [#3986](https://github.com/vatesfr/xen-orchestra/issues/3986) (PR [#3987](https://github.com/vatesfr/xen-orchestra/pull/3987))
- [Stats] Fix cache usage with simultaneous requests [#4017](https://github.com/vatesfr/xen-orchestra/issues/4017) (PR [#4028](https://github.com/vatesfr/xen-orchestra/pull/4028))
- [Backup NG] Fix compression displayed for the wrong backup mode (PR [#4021](https://github.com/vatesfr/xen-orchestra/pull/4021))
## **5.32.2** (2019-02-28)
### Bug fixes
- Fix XAPI events monitoring on old version (XenServer 7.2)
## **5.32.1** (2019-02-28)
### Bug fixes
- Fix a very short timeout in the monitoring of XAPI events which may lead to unresponsive XenServer hosts
## **5.32.0** (2019-02-28)
### Enhancements
- [VM migration] Display same-pool hosts first in the selector [#3262](https://github.com/vatesfr/xen-orchestra/issues/3262) (PR [#3890](https://github.com/vatesfr/xen-orchestra/pull/3890))
- [Home/VM] Sort VM by start time [#3955](https://github.com/vatesfr/xen-orchestra/issues/3955) (PR [#3970](https://github.com/vatesfr/xen-orchestra/pull/3970))
- [Editable fields] Unfocusing (clicking outside) submits the change instead of canceling (PR [#3980](https://github.com/vatesfr/xen-orchestra/pull/3980))
- [Network] Dedicated page for network creation [#3895](https://github.com/vatesfr/xen-orchestra/issues/3895) (PR [#3906](https://github.com/vatesfr/xen-orchestra/pull/3906))
- [Logs] Add button to download the log [#3957](https://github.com/vatesfr/xen-orchestra/issues/3957) (PR [#3985](https://github.com/vatesfr/xen-orchestra/pull/3985))
- [Continuous Replication] Share full copy between schedules [#3973](https://github.com/vatesfr/xen-orchestra/issues/3973) (PR [#3995](https://github.com/vatesfr/xen-orchestra/pull/3995))
- [Backup] Ability to backup XO configuration and pool metadata [#808](https://github.com/vatesfr/xen-orchestra/issues/808) [#3501](https://github.com/vatesfr/xen-orchestra/issues/3501) (PR [#3912](https://github.com/vatesfr/xen-orchestra/pull/3912))
### Bug fixes
- [Host] Fix multipathing status for XenServer < 7.5 [#3956](https://github.com/vatesfr/xen-orchestra/issues/3956) (PR [#3961](https://github.com/vatesfr/xen-orchestra/pull/3961))
- [Home/VM] Show creation date of the VM on if it available [#3953](https://github.com/vatesfr/xen-orchestra/issues/3953) (PR [#3959](https://github.com/vatesfr/xen-orchestra/pull/3959))
- [Notifications] Fix invalid notifications when not registered (PR [#3966](https://github.com/vatesfr/xen-orchestra/pull/3966))
- [Import] Fix import of some OVA files [#3962](https://github.com/vatesfr/xen-orchestra/issues/3962) (PR [#3974](https://github.com/vatesfr/xen-orchestra/pull/3974))
- [Servers] Fix *already connected error* after a server has been removed during connection [#3976](https://github.com/vatesfr/xen-orchestra/issues/3976) (PR [#3977](https://github.com/vatesfr/xen-orchestra/pull/3977))
- [Backup] Fix random _mount_ issues with NFS/SMB remotes [#3973](https://github.com/vatesfr/xen-orchestra/issues/3973) (PR [#4003](https://github.com/vatesfr/xen-orchestra/pull/4003))
### Released packages
- @xen-orchestra/fs v0.7.0
- xen-api v0.24.3
- xoa-updater v0.15.2
- xo-server v5.36.0
- xo-web v5.36.0
## **5.31.2** (2019-02-08)
### Enhancements
- [Home] Set description on bulk snapshot [#3925](https://github.com/vatesfr/xen-orchestra/issues/3925) (PR [#3933](https://github.com/vatesfr/xen-orchestra/pull/3933))
- Work-around the XenServer issue when `VBD#VDI` is an empty string instead of an opaque reference (PR [#3950](https://github.com/vatesfr/xen-orchestra/pull/3950))
- [VDI migration] Retry when XenServer fails with `TOO_MANY_STORAGE_MIGRATES` (PR [#3940](https://github.com/vatesfr/xen-orchestra/pull/3940))
- [VM]
- [General] The creation date of the VM is now visible [#3932](https://github.com/vatesfr/xen-orchestra/issues/3932) (PR [#3947](https://github.com/vatesfr/xen-orchestra/pull/3947))
- [Disks] Display device name [#3902](https://github.com/vatesfr/xen-orchestra/issues/3902) (PR [#3946](https://github.com/vatesfr/xen-orchestra/pull/3946))
- [VM Snapshotting]
- Detect and destroy broken quiesced snapshot left by XenServer [#3936](https://github.com/vatesfr/xen-orchestra/issues/3936) (PR [#3937](https://github.com/vatesfr/xen-orchestra/pull/3937))
- Retry twice after a 1 minute delay if quiesce failed [#3938](https://github.com/vatesfr/xen-orchestra/issues/3938) (PR [#3952](https://github.com/vatesfr/xen-orchestra/pull/3952))
### Bug fixes
- [Import] Fix import of big OVA files
- [Host] Show the host's memory usage instead of the sum of the VMs' memory usage (PR [#3924](https://github.com/vatesfr/xen-orchestra/pull/3924))
- [SAML] Make `AssertionConsumerServiceURL` matches the callback URL
- [Backup NG] Correctly delete broken VHD chains [#3875](https://github.com/vatesfr/xen-orchestra/issues/3875) (PR [#3939](https://github.com/vatesfr/xen-orchestra/pull/3939))
- [Remotes] Don't ignore `mount` options [#3935](https://github.com/vatesfr/xen-orchestra/issues/3935) (PR [#3931](https://github.com/vatesfr/xen-orchestra/pull/3931))
### Released packages
- xen-api v0.24.2
- @xen-orchestra/fs v0.6.1
- xo-server-auth-saml v0.5.3
- xo-server v5.35.0
- xo-web v5.35.0
## **5.31.0** (2019-01-31)
### Enhancements
- [Backup NG] Restore logs moved to restore tab [#3772](https://github.com/vatesfr/xen-orchestra/issues/3772) (PR [#3802](https://github.com/vatesfr/xen-orchestra/pull/3802))
- [Remotes] New SMB implementation that provides better stability and performance [#2257](https://github.com/vatesfr/xen-orchestra/issues/2257) (PR [#3708](https://github.com/vatesfr/xen-orchestra/pull/3708))
- [VM/advanced] ACL management from VM view [#3040](https://github.com/vatesfr/xen-orchestra/issues/3040) (PR [#3774](https://github.com/vatesfr/xen-orchestra/pull/3774))
- [VM / snapshots] Ability to save the VM memory [#3795](https://github.com/vatesfr/xen-orchestra/issues/3795) (PR [#3812](https://github.com/vatesfr/xen-orchestra/pull/3812))
- [Backup NG / Health] Show number of lone snapshots in tab label [#3500](https://github.com/vatesfr/xen-orchestra/issues/3500) (PR [#3824](https://github.com/vatesfr/xen-orchestra/pull/3824))
- [Login] Add autofocus on username input on login page [#3835](https://github.com/vatesfr/xen-orchestra/issues/3835) (PR [#3836](https://github.com/vatesfr/xen-orchestra/pull/3836))
- [Home/VM] Bulk snapshot: specify snapshots' names [#3778](https://github.com/vatesfr/xen-orchestra/issues/3778) (PR [#3787](https://github.com/vatesfr/xen-orchestra/pull/3787))
- [Remotes] Show free space and disk usage on remote [#3055](https://github.com/vatesfr/xen-orchestra/issues/3055) (PR [#3767](https://github.com/vatesfr/xen-orchestra/pull/3767))
- [New SR] Add tooltip for reattach action button [#3845](https://github.com/vatesfr/xen-orchestra/issues/3845) (PR [#3852](https://github.com/vatesfr/xen-orchestra/pull/3852))
- [VM migration] Display hosts' free memory [#3264](https://github.com/vatesfr/xen-orchestra/issues/3264) (PR [#3832](https://github.com/vatesfr/xen-orchestra/pull/3832))
- [Plugins] New field to filter displayed plugins (PR [#3832](https://github.com/vatesfr/xen-orchestra/pull/3871))
- Ability to copy ID of "unknown item"s [#3833](https://github.com/vatesfr/xen-orchestra/issues/3833) (PR [#3856](https://github.com/vatesfr/xen-orchestra/pull/3856))
@@ -24,6 +127,12 @@
### Bug fixes
- [Self] Display sorted Resource Sets [#3818](https://github.com/vatesfr/xen-orchestra/issues/3818) (PR [#3823](https://github.com/vatesfr/xen-orchestra/pull/3823))
- [Servers] Correctly report connecting status (PR [#3838](https://github.com/vatesfr/xen-orchestra/pull/3838))
- [Servers] Fix cannot reconnect to a server after connection has been lost [#3839](https://github.com/vatesfr/xen-orchestra/issues/3839) (PR [#3841](https://github.com/vatesfr/xen-orchestra/pull/3841))
- [New VM] Fix `NO_HOSTS_AVAILABLE()` error when creating a VM on a local SR from template on another local SR [#3084](https://github.com/vatesfr/xen-orchestra/issues/3084) (PR [#3827](https://github.com/vatesfr/xen-orchestra/pull/3827))
- [Backup NG] Fix typo in the form [#3854](https://github.com/vatesfr/xen-orchestra/issues/3854) (PR [#3855](https://github.com/vatesfr/xen-orchestra/pull/3855))
- [New SR] No warning when creating a NFS SR on a path that is already used as NFS SR [#3844](https://github.com/vatesfr/xen-orchestra/issues/3844) (PR [#3851](https://github.com/vatesfr/xen-orchestra/pull/3851))
- [New SR] No redirection if the SR creation failed or canceled [#3843](https://github.com/vatesfr/xen-orchestra/issues/3843) (PR [#3853](https://github.com/vatesfr/xen-orchestra/pull/3853))
- [Home] Fix two tabs opened by middle click in Firefox [#3450](https://github.com/vatesfr/xen-orchestra/issues/3450) (PR [#3825](https://github.com/vatesfr/xen-orchestra/pull/3825))
- [XOA] Enable downgrade for ending trial (PR [#3867](https://github.com/vatesfr/xen-orchestra/pull/3867))
@@ -38,6 +147,8 @@
### Released packages
- vhd-cli v0.2.0
- @xen-orchestra/fs v0.6.0
- vhd-lib v0.5.1
- xoa-updater v0.15.0
- xen-api v0.24.1
@@ -45,38 +156,6 @@
- xo-server v5.34.0
- xo-web v5.34.0
## *staging*
### Enhancements
- [Backup NG] Restore logs moved to restore tab [#3772](https://github.com/vatesfr/xen-orchestra/issues/3772) (PR [#3802](https://github.com/vatesfr/xen-orchestra/pull/3802))
- [Remotes] New SMB implementation that provides better stability and performance [#2257](https://github.com/vatesfr/xen-orchestra/issues/2257) (PR [#3708](https://github.com/vatesfr/xen-orchestra/pull/3708))
- [VM/advanced] ACL management from VM view [#3040](https://github.com/vatesfr/xen-orchestra/issues/3040) (PR [#3774](https://github.com/vatesfr/xen-orchestra/pull/3774))
- [VM / snapshots] Ability to save the VM memory [#3795](https://github.com/vatesfr/xen-orchestra/issues/3795) (PR [#3812](https://github.com/vatesfr/xen-orchestra/pull/3812))
- [Backup NG / Health] Show number of lone snapshots in tab label [#3500](https://github.com/vatesfr/xen-orchestra/issues/3500) (PR [#3824](https://github.com/vatesfr/xen-orchestra/pull/3824))
- [Login] Add autofocus on username input on login page [#3835](https://github.com/vatesfr/xen-orchestra/issues/3835) (PR [#3836](https://github.com/vatesfr/xen-orchestra/pull/3836))
- [Home/VM] Bulk snapshot: specify snapshots' names [#3778](https://github.com/vatesfr/xen-orchestra/issues/3778) (PR [#3787](https://github.com/vatesfr/xen-orchestra/pull/3787))
- [Remotes] Show free space and disk usage on remote [#3055](https://github.com/vatesfr/xen-orchestra/issues/3055) (PR [#3767](https://github.com/vatesfr/xen-orchestra/pull/3767))
- [New SR] Add tooltip for reattach action button [#3845](https://github.com/vatesfr/xen-orchestra/issues/3845) (PR [#3852](https://github.com/vatesfr/xen-orchestra/pull/3852))
### Bug fixes
- [Self] Display sorted Resource Sets [#3818](https://github.com/vatesfr/xen-orchestra/issues/3818) (PR [#3823](https://github.com/vatesfr/xen-orchestra/pull/3823))
- [Servers] Correctly report connecting status (PR [#3838](https://github.com/vatesfr/xen-orchestra/pull/3838))
- [Servers] Fix cannot reconnect to a server after connection has been lost [#3839](https://github.com/vatesfr/xen-orchestra/issues/3839) (PR [#3841](https://github.com/vatesfr/xen-orchestra/pull/3841))
- [New VM] Fix `NO_HOSTS_AVAILABLE()` error when creating a VM on a local SR from template on another local SR [#3084](https://github.com/vatesfr/xen-orchestra/issues/3084) (PR [#3827](https://github.com/vatesfr/xen-orchestra/pull/3827))
- [Backup NG] Fix typo in the form [#3854](https://github.com/vatesfr/xen-orchestra/issues/3854) (PR [#3855](https://github.com/vatesfr/xen-orchestra/pull/3855))
- [New SR] No warning when creating a NFS SR on a path that is already used as NFS SR [#3844](https://github.com/vatesfr/xen-orchestra/issues/3844) (PR [#3851](https://github.com/vatesfr/xen-orchestra/pull/3851))
### Released packages
- vhd-lib v0.5.0
- vhd-cli v0.2.0
- xen-api v0.24.0
- @xen-orchestra/fs v0.6.0
- xo-server v5.33.0
- xo-web v5.33.0
## **5.30.0** (2018-12-20)
### Enhancements

37
CHANGELOG.unreleased.md Normal file
View File

@@ -0,0 +1,37 @@
> This file contains all changes that have not been released yet.
### Enhancements
- [Remotes] Benchmarks (read and write rate speed) added when remote is tested [#3991](https://github.com/vatesfr/xen-orchestra/issues/3991) (PR [#4015](https://github.com/vatesfr/xen-orchestra/pull/4015))
- [Cloud Config] Support both NoCloud and Config Drive 2 datasources for maximum compatibility (PR [#4053](https://github.com/vatesfr/xen-orchestra/pull/4053))
- [Advanced] Configurable cookie validity (PR [#4059](https://github.com/vatesfr/xen-orchestra/pull/4059))
- [Plugins] Display number of installed plugins [#4008](https://github.com/vatesfr/xen-orchestra/issues/4008) (PR [#4050](https://github.com/vatesfr/xen-orchestra/pull/4050))
- [Continuous Replication] Opt-in mode to guess VHD size, should help with XenServer 7.1 CU2 and various `VDI_IO_ERROR` errors (PR [#3726](https://github.com/vatesfr/xen-orchestra/pull/3726))
- [VM/Snapshots] Always delete broken quiesced snapshots [#4074](https://github.com/vatesfr/xen-orchestra/issues/4074) (PR [#4075](https://github.com/vatesfr/xen-orchestra/pull/4075))
- [Settings/Servers] Display link to pool [#4041](https://github.com/vatesfr/xen-orchestra/issues/4041) (PR [#4045](https://github.com/vatesfr/xen-orchestra/pull/4045))
- [Import] Change wording of drop zone (PR [#4020](https://github.com/vatesfr/xen-orchestra/pull/4020))
- [Backup NG] Ability to set the interval of the full backups [#1783](https://github.com/vatesfr/xen-orchestra/issues/1783) (PR [#4083](https://github.com/vatesfr/xen-orchestra/pull/4083))
- [Hosts] Display a warning icon if you have XenServer license restrictions [#4091](https://github.com/vatesfr/xen-orchestra/issues/4091) (PR [#4094](https://github.com/vatesfr/xen-orchestra/pull/4094))
- [Restore] Ability to restore a metadata backup [#4004](https://github.com/vatesfr/xen-orchestra/issues/4004) (PR [#4023](https://github.com/vatesfr/xen-orchestra/pull/4023))
### Bug fixes
- [Home] Always sort the items by their names as a secondary sort criteria [#3983](https://github.com/vatesfr/xen-orchestra/issues/3983) (PR [#4047](https://github.com/vatesfr/xen-orchestra/pull/4047))
- [Remotes] Fixes `spawn mount EMFILE` error during backup
- Properly redirect to sign in page instead of being stuck in a refresh loop
- [Backup-ng] No more false positives when list matching VMs on Home page [#4078](https://github.com/vatesfr/xen-orchestra/issues/4078) (PR [#4085](https://github.com/vatesfr/xen-orchestra/pull/4085))
- [Plugins] Properly remove optional settings when unchecking _Fill information_ (PR [#4076](https://github.com/vatesfr/xen-orchestra/pull/4076))
- [Patches] (PR [#4077](https://github.com/vatesfr/xen-orchestra/pull/4077))
- Add a host to a pool: fixes the auto-patching of the host on XenServer < 7.2 [#3783](https://github.com/vatesfr/xen-orchestra/issues/3783)
- Add a host to a pool: homogenizes both the host and **pool**'s patches [#2188](https://github.com/vatesfr/xen-orchestra/issues/2188)
- Safely install a subset of patches on a pool [#3777](https://github.com/vatesfr/xen-orchestra/issues/3777)
- XCP-ng: no longer requires to run `yum install xcp-ng-updater` when it's already installed [#3934](https://github.com/vatesfr/xen-orchestra/issues/3934)
### Released packages
- xen-api v0.24.6
- vhd-lib v0.6.0
- @xen-orchestra/fs v0.8.0
- xo-server-usage-report v0.7.2
- xo-server v5.38.0
- xo-web v5.38.0

View File

@@ -4,7 +4,7 @@
- [ ] PR reference the relevant issue (e.g. `Fixes #007`)
- [ ] if UI changes, a screenshot has been added to the PR
- [ ] CHANGELOG:
- [ ] `CHANGELOG.unreleased.md`:
- enhancement/bug fix entry added
- list of packages to release updated (`${name} v${new version}`)
- [ ] documentation updated

View File

@@ -33,6 +33,7 @@
* [Disaster recovery](disaster_recovery.md)
* [Smart Backup](smart_backup.md)
* [File level Restore](file_level_restore.md)
* [Metadata Backup](metadata_backup.md)
* [Backup Concurrency](concurrency.md)
* [Configure backup reports](backup_reports.md)
* [Backup troubleshooting](backup_troubleshooting.md)
@@ -51,6 +52,7 @@
* [Job manager](scheduler.md)
* [Alerts](alerts.md)
* [Load balancing](load_balancing.md)
* [Emergency Shutdown](emergency_shutdown.md)
* [Auto scalability](auto_scalability.md)
* [Forecaster](forecaster.md)
* [Recipes](recipes.md)

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

BIN
docs/assets/cr-seed-1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

BIN
docs/assets/cr-seed-2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

BIN
docs/assets/cr-seed-3.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

BIN
docs/assets/cr-seed-4.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.5 KiB

View File

@@ -12,7 +12,9 @@ Another good way to check if there is activity is the XOA VM stats view (on the
### VDI chain protection
This means your previous VM disks and snapshots should be "merged" (*coalesced* in the XenServer world) before we can take a new snapshot. This mechanism is handled by XenServer itself, not Xen Orchestra. But we can check your existing VDI chain and avoid creating more snapshots than your storage can merge. Otherwise, this will lead to catastrophic consequences. Xen Orchestra is the **only** XenServer/XCP backup product dealing with this.
Backup jobs regularly delete snapshots. When a snapshot is deleted, either manually or via a backup job, it triggers the need for Xenserver to coalesce the VDI chain - to merge the remaining VDIs and base copies in the chain. This means generally we cannot take too many new snapshots on said VM until Xenserver has finished running a coalesce job on the VDI chain.
This mechanism and scheduling is handled by XenServer itself, not Xen Orchestra. But we can check your existing VDI chain and avoid creating more snapshots than your storage can merge. If we don't, this will lead to catastrophic consequences. Xen Orchestra is the **only** XenServer/XCP backup product that takes this into account and offers protection.
Without this detection, you could have 2 potential issues:
@@ -21,9 +23,9 @@ Without this detection, you could have 2 potential issues:
The first issue is a chain that contains more than 30 elements (fixed XenServer limit), and the other one means it's full because the "coalesce" process couldn't keep up the pace and the storage filled up.
In the end, this message is a **protection mechanism against damaging your SR**. The backup job will fail, but XenServer itself should eventually automatically coalesce the snapshot chain, and the the next time the backup job should complete.
In the end, this message is a **protection mechanism preventing damage to your SR**. The backup job will fail, but XenServer itself should eventually automatically coalesce the snapshot chain, and the the next time the backup job should complete.
Just remember this: **coalesce will happen every time a snapshot is removed**.
Just remember this: **a coalesce should happen every time a snapshot is removed**.
> You can read more on this on our dedicated blog post regarding [XenServer coalesce detection](https://xen-orchestra.com/blog/xenserver-coalesce-detection-in-xen-orchestra/).
@@ -33,11 +35,13 @@ Just remember this: **coalesce will happen every time a snapshot is removed**.
First check SMlog on the XenServer host for messages relating to VDI corruption or coalesce job failure. For example, by running `cat /var/log/SMlog | grep -i exception` or `cat /var/log/SMlog | grep -i error` on the XenServer host with the affected storage.
Coalesce jobs can also fail to run if the SR does not have enough free space. Check the problematic SR and make sure it has enough free space, generally 30% or more free is recommended depending on VM size.
Coalesce jobs can also fail to run if the SR does not have enough free space. Check the problematic SR and make sure it has enough free space, generally 30% or more free is recommended depending on VM size. You can check if this is the issue by searching `SMlog` with `grep -i coales /var/log/SMlog` (you may have to look at previous logs such as `SMlog.1`).
You can check if a coalesce job is currently active by running `ps axf | grep vhd` on the XenServer host and looking for a VHD process in the results (one of the resulting processes will be the grep command you just ran, ignore that one).
If you don't see any running coalesce jobs, and can't find any other reason that XenServer has not started one, you can attempt to make it start a coalesce job by rescanning the SR. This is harmless to try, but will not always result in a coalesce. Visit the problematic SR in the XOA UI, then click the "Rescan All Disks" button towards the top right: it looks like a refresh circle icon. This should begin the coalesce process - if you click the Advanced tab in the SR view, the "disks needing to be coalesced" list should become smaller and smaller.
If you don't see any running coalesce jobs, and can't find any other reason that XenServer has not started one, you can attempt to make it start a coalesce job by rescanning the SR. This is harmless to try, but will not always result in a coalesce. Visit the problematic SR in the XOA UI, then click the "Rescan All Disks" button towards the top right: it looks like a refresh circle icon. This should begin the coalesce process - if you click the Advanced tab in the SR view, the "disks needing to be coalesced" list should become smaller and smaller.
As a last resort, migrating the VM (more specifically, its disks) to a new storage repository will also force a coalesce and solve this issue. That means migrating a VM to another host (with its own storage) and back will force the VDI chain for that VM to be coalesced, and get rid of the `VDI Chain Protection` message.
### Parse Error

View File

@@ -1,5 +1,7 @@
# Backups
> Watch our [introduction video](https://www.youtube.com/watch?v=FfUqIwT8KzI) (45m) to Backup in Xen Orchestra!
This section is dedicated to all existing methods of rolling back or backing up your VMs in Xen Orchestra.
There are several ways to protect your VMs:
@@ -8,6 +10,7 @@ There are several ways to protect your VMs:
* [Rolling Snapshots](rolling_snapshots.md) [*Starter Edition*]
* [Delta Backups](delta_backups.md) (best of both previous ones) [*Enterprise Edition*]
* [Disaster Recovery](disaster_recovery.md) [*Enterprise Edition*]
* [Metadata Backups](metadata_backup.md) [*Enterprise Edition*]
* [Continuous Replication](continuous_replication.md) [*Premium Edition*]
* [File Level Restore](file_level_restore.md) [*Premium Edition*]
@@ -39,7 +42,7 @@ Each backups' job execution is identified by a `runId`. You can find this `runId
All backup types rely on snapshots. But what about data consistency? By default, Xen Orchestra will try to take a **quiesced snapshot** every time a snapshot is done (and fall back to normal snapshots if it's not possible).
Snapshots of Windows VMs can be quiesced (especially MS SQL or Exchange services) after you have installed Xen Tools in your VMs. However, [there is an extra step to install the VSS provider on windows](quiesce). A quiesced snapshot means the operating system will be notified and the cache will be flushed to disks. This way, your backups will always be consistent.
Snapshots of Windows VMs can be quiesced (especially MS SQL or Exchange services) after you have installed Xen Tools in your VMs. However, [there is an extra step to install the VSS provider on windows](https://xen-orchestra.com/blog/xenserver-quiesce-snapshots/). A quiesced snapshot means the operating system will be notified and the cache will be flushed to disks. This way, your backups will always be consistent.
To see if you have quiesced snapshots for a VM, just go into its snapshot tab, then the "info" icon means it is a quiesced snapshot:

View File

@@ -1,7 +1,5 @@
# CloudInit
> CloudInit support is available in the 4.11 release and higher
Cloud-init is a program "that handles the early initialization of a cloud instance"[^n]. In other words, you can, on a "cloud-init"-ready template VM, pass a lot of data at first boot:
* setting the hostname
@@ -18,25 +16,27 @@ So it means very easily customizing your VM when you create it from a compatible
You only need to use a template of a VM with CloudInit installed inside it. [Check this blog post to learn how to install CloudInit](https://xen-orchestra.com/blog/centos-cloud-template-for-xenserver/).
**Note:** In XOA 5.31, we changed the cloud-init config drive type from [OpenStack](https://cloudinit.readthedocs.io/en/latest/topics/datasources/configdrive.html) to the [NoCloud](https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html) type. This will allow us to pass network configuration to VMs in the future. For 99% of users, including default cloud-init installs, this change will have no effect. However if you have previously modified your cloud-init installation in a VM template to only look for `openstack` drive types (for instance with the `datasource_list` setting in `/etc/cloud/cloud.cfg`) you need to modify it to also look for `nocloud`.
## Usage
First, select your compatible template (CloudInit ready) and name it:
![](https://xen-orchestra.com/blog/content/images/2015/12/template_choice.png)
![](./assets/cloud-init-1.png)
Then, activate the config drive and insert your SSH key. Or you can also use a custom CloudInit configuration:
![](https://xen-orchestra.com/blog/content/images/2016/02/CloudInit.png)
![](./assets/cloud-init-2.png)
> CloudInit configuration examples are [available here](http://cloudinit.readthedocs.org/en/latest/topics/examples.html).
You can extend the disk size (**in this case, the template disk was 8 GiB originally**):
You can extend the disk size (**in this case, the template disk was 8 GiB originally**). We'll extend it to 20GiB:
![](https://xen-orchestra.com/blog/content/images/2015/12/diskedition.png)
![](./assets/cloud-init-3.png)
Finally, create the VM:
![](https://xen-orchestra.com/blog/content/images/2015/12/recap.png)
![](./assets/cloud-init-4.png)
Now start the VM and SSH to its IP:

View File

@@ -43,11 +43,19 @@ To protect the replication, we removed the possibility to boot your copied VM di
### Job creation
Create the Continuous Replication backup job, and leave it disabled for now. On the main Backup-NG page, note its identifiers, the main `backupJobId` and the ID of one on the schedules for the job, `backupScheduleId`.
Create the Continuous Replication backup job, and leave it disabled for now. On the main Backup-NG page, copy the job's `backupJobId` by hovering to the left of the shortened ID and clicking the copy to clipboard button:
![](./assets/cr-seed-1.png)
Copy it somewhere temporarily. Now we need to also copy the ID of the job schedule, `backupScheduleId`. Do this by hovering over the schedule name in the same panel as before, and clicking the copy to clipboard button. Keep it with the `backupJobId` you copied previously as we will need them all later:
![](./assets/cr-seed-2.png)
### Seed creation
Manually create a snapshot on the VM to backup, and note its UUID as `snapshotUuid` from the snapshot panel for the VM.
Manually create a snapshot on the VM being backed up, then copy this snapshot UUID, `snapshotUuid` from the snapshot panel of the VM:
![](./assets/cr-seed-3.png)
> DO NOT ever delete or alter this snapshot, feel free to rename it to make that clear.
@@ -55,7 +63,9 @@ Manually create a snapshot on the VM to backup, and note its UUID as `snapshotUu
Export this snapshot to a file, then import it on the target SR.
Note the UUID of this newly created VM as `targetVmUuid`.
We need to copy the UUID of this newly created VM as well, `targetVmUuid`:
![](./assets/cr-seed-4.png)
> DO not start this VM or it will break the Continuous Replication job! You can rename this VM to more easily remember this.
@@ -66,7 +76,7 @@ The XOA backup system requires metadata to correctly associate the source snapsh
First install the tool (all the following is done from the XOA VM CLI):
```
npm i -g xo-cr-seed
sudo npm i -g --unsafe-perm @xen-orchestra/cr-seed-cli
```
Here is an example of how the utility expects the UUIDs and info passed to it:

View File

@@ -0,0 +1,27 @@
# Emergency Shutdown
If you have a UPS for your hosts, and lose power, you may have a limited amount of time to shut down all of your VM infrastructure before the batteries run out. If you find yourself in this situation, or any other situation requiring the fast shutdown of everything, you can use the **Emergency Shutdown** feature.
## How to activate
On the host view, clicking on this button will trigger the _Emergency Shutdown_ procedure:
![](./assets/e-shutdown-1.png)
1. **All running VMs will be suspended** (think of it like "hibernate" on your laptop: the RAM will be stored in the storage repository).
2. Only after this is complete, the host will be halted.
Here, you can see the running VMs are being suspended:
![](./assets/e-shutdown-2.png)
And finally, that's it. They are cleanly shut down with the RAM saved to disk to be resumed later:
![](./assets/e-shutdown-3.png)
Now the host is halted automatically.
## Powering back on
When the power outage is over, all you need to do is:
1. Start your host.
2. All your VMs can be resumed, your RAM is preserved and therefore your VMs will be in the exact same state as they were before the power outage.

View File

@@ -6,9 +6,9 @@
> Please take time to read this guide carefully.
This installation has been validated against a fresh Debian 8 (Jessie) x64 install. It should be nearly the same on other dpkg systems. For RPM based OS's, it should be close, as most of our dependencies come from NPM and not the OS itself.
This installation has been validated against a fresh Debian 9 (Stretch) x64 install. It should be nearly the same on other dpkg systems. For RPM based OS's, it should be close, as most of our dependencies come from NPM and not the OS itself.
As you may have seen,in other parts of the documentation, XO is composed of two parts: [xo-server](https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server/) and [xo-web](https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-web/). They can be installed separately, even on different machines, but for the sake of simplicity we will set them up together.
As you may have seen in other parts of the documentation, XO is composed of two parts: [xo-server](https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server/) and [xo-web](https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-web/). They can be installed separately, even on different machines, but for the sake of simplicity we will set them up together.
## Packages and Pre-requisites
@@ -38,7 +38,7 @@ XO needs the following packages to be installed. Redis is used as a database by
For example, on Debian:
```
apt-get install build-essential redis-server libpng-dev git python-minimal libvhdi-utils lvm2
apt-get install build-essential redis-server libpng-dev git python-minimal libvhdi-utils lvm2 cifs-utils
```
## Fetching the Code
@@ -49,13 +49,14 @@ You need to use the `git` source code manager to fetch the code. Ideally, you sh
git clone -b master http://github.com/vatesfr/xen-orchestra
```
> Note: xo-server and xo-web have been migrated to the [xen-orchestra](https://github.com/vatesfr/xen-orchestra) mono-repository.
> Note: xo-server and xo-web have been migrated to the [xen-orchestra](https://github.com/vatesfr/xen-orchestra) mono-repository - so you only need the single clone command above
## Installing dependencies
Once you have it, use `yarn`, as the non-root (or root) user owning the fetched code, to install the other dependencies. Enter the `xen-orchestra` directory and run the following commands:
Now that you have the code, you can enter the `xen-orchestra` directory and use `yarn` to install other dependencies. Then finally build it using `yarn build`. Be sure to run `yarn` commands as the same user you will be using to run Xen Orchestra:
```
$ cd xen-orchestra
$ yarn
$ yarn build
```
@@ -86,7 +87,7 @@ WebServer listening on localhost:80
## Running XO
The only part you need to launch is xo-server which is quite easy to do. From the `xen-orchestra/packages/xo-server` directory, run the following:
The only part you need to launch is xo-server, which is quite easy to do. From the `xen-orchestra/packages/xo-server` directory, run the following:
```
$ yarn start

31
docs/metadata_backup.md Normal file
View File

@@ -0,0 +1,31 @@
# Metadata backup
> WARNING: Metadata backup is an experimental feature. Restore is not yet available and some unexpected issues may occur.
## Introduction
XCP-ng and Citrix Hypervisor (Xenserver) hosts use a database to store metadata about VMs and their associated resources such as storage and networking. Metadata forms this complete view of all VMs available on your pool. Backing up the metadata of your pool allows you to recover from a physical hardware failure scenario in which you lose your hosts without losing your storage (SAN, NAS...).
In Xen Orchestra, Metadata backup is divided into two different options:
* Pool metadata backup
* XO configuration backup
### How to use metadata backup
In the backup job section, when creating a new backup job, you will now have a choice between backing up VMs and backing up Metadata.
![](https://user-images.githubusercontent.com/21563339/53413921-bd636f00-39cd-11e9-8a3c-d4f893135fa4.png)
When you select Metadata backup, you will have a new backup job screen, letting you choose between a pool metadata backup and an XO configuration backup (or both at the same time):
![](https://user-images.githubusercontent.com/21563339/52416838-d2de2b00-2aea-11e9-8da0-340fcb2767db.png)
Define the name and retention for the job.
![](https://user-images.githubusercontent.com/21563339/52471527-65390a00-2b91-11e9-8019-600a4d9eeafb.png)
Once created, the job is displayed with the other classic jobs.
![](https://user-images.githubusercontent.com/21563339/52416802-c0fc8800-2aea-11e9-8ef0-b0c1bd0e48b8.png)
> Restore for metadata backup jobs should be available in XO 5.33

View File

@@ -4,10 +4,10 @@
"@babel/register": "^7.0.0",
"babel-core": "^7.0.0-0",
"babel-eslint": "^10.0.1",
"babel-jest": "^23.0.1",
"babel-jest": "^24.1.0",
"benchmark": "^2.1.4",
"eslint": "^5.1.0",
"eslint-config-prettier": "^3.3.0",
"eslint-config-prettier": "^4.1.0",
"eslint-config-standard": "12.0.0",
"eslint-config-standard-jsx": "^6.0.2",
"eslint-plugin-import": "^2.8.0",
@@ -16,13 +16,13 @@
"eslint-plugin-react": "^7.6.1",
"eslint-plugin-standard": "^4.0.0",
"exec-promise": "^0.7.0",
"flow-bin": "^0.90.0",
"flow-bin": "^0.95.1",
"globby": "^9.0.0",
"husky": "^1.2.1",
"jest": "^23.0.1",
"jest": "^24.1.0",
"lodash": "^4.17.4",
"prettier": "^1.10.2",
"promise-toolbox": "^0.11.0",
"promise-toolbox": "^0.12.1",
"sorted-object": "^2.0.1"
},
"engines": {
@@ -34,7 +34,6 @@
}
},
"jest": {
"timers": "fake",
"collectCoverage": true,
"projects": [
"<rootDir>"

View File

@@ -7,6 +7,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/complex-matcher",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/complex-matcher",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -7,6 +7,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/value-matcher",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/value-matcher",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -1,12 +1,13 @@
{
"name": "vhd-cli",
"version": "0.2.0",
"version": "0.3.0",
"license": "ISC",
"description": "",
"keywords": [],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-cli",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/vhd-cli",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -26,12 +27,12 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/fs": "^0.6.0",
"@xen-orchestra/fs": "^0.8.0",
"cli-progress": "^2.0.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"struct-fu": "^1.2.0",
"vhd-lib": "^0.5.1"
"vhd-lib": "^0.6.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -41,7 +42,7 @@
"cross-env": "^5.1.3",
"execa": "^1.0.0",
"index-modules": "^0.3.0",
"promise-toolbox": "^0.11.0",
"promise-toolbox": "^0.12.1",
"rimraf": "^2.6.1",
"tmp": "^0.0.33"
},

View File

@@ -0,0 +1,33 @@
import { asCallback, fromCallback, fromEvent } from 'promise-toolbox'
import { getHandler } from '@xen-orchestra/fs'
import { relative } from 'path'
import { start as createRepl } from 'repl'
import Vhd, * as vhdLib from 'vhd-lib'
export default async args => {
const cwd = process.cwd()
const handler = getHandler({ url: 'file://' + cwd })
await handler.sync()
try {
const repl = createRepl({
prompt: 'vhd> ',
})
Object.assign(repl.context, vhdLib)
repl.context.handler = handler
repl.context.open = path => new Vhd(handler, relative(cwd, path))
// Make the REPL waits for promise completion.
repl.eval = (evaluate => (cmd, context, filename, cb) => {
asCallback.call(
fromCallback(cb => {
evaluate.call(repl, cmd, context, filename, cb)
}).then(value => (Array.isArray(value) ? Promise.all(value) : value)),
cb
)
})(repl.eval)
await fromEvent(repl, 'exit')
} finally {
await handler.forget()
}
}

View File

@@ -2,6 +2,8 @@
import execPromise from 'exec-promise'
import pkg from '../package.json'
import commands from './commands'
function runCommand(commands, [command, ...args]) {
@@ -18,7 +20,9 @@ function runCommand(commands, [command, ...args]) {
${Object.keys(commands)
.filter(command => command !== 'help')
.map(command => ` ${this.command} ${command}`)
.join('\n\n')}`
.join('\n\n')}
vhd-cli ${pkg.version}`
}
throw `invalid command ${command}` // eslint-disable-line no-throw-literal

View File

@@ -1,38 +1,40 @@
/* eslint-env jest */
import asyncIteratorToStream from 'async-iterator-to-stream'
import execa from 'execa'
import fs from 'fs-extra'
import getStream from 'get-stream'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { fromEvent, pFromCallback } from 'promise-toolbox'
import { getHandler } from '@xen-orchestra/fs'
import { pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { randomBytes } from 'crypto'
import Vhd, { chainVhd, createSyntheticStream, mergeVhd as vhdMerge } from './'
import { SECTOR_SIZE } from './src/_constants'
const initialDir = process.cwd()
let tempDir = null
jest.setTimeout(60000)
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
await pFromCallback(cb => rimraf(tempDir, cb))
})
async function createRandomFile(name, sizeMb) {
await execa('bash', [
'-c',
`< /dev/urandom tr -dc "\\t\\n [:alnum:]" | head -c ${sizeMb}M >${name}`,
])
async function createRandomFile(name, sizeMB) {
const createRandomStream = asyncIteratorToStream(function*(size) {
while (size-- > 0) {
yield Buffer.from([Math.floor(Math.random() * 256)])
}
})
const input = createRandomStream(sizeMB * 1024 * 1024)
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
}
async function checkFile(vhdName) {
@@ -53,31 +55,35 @@ async function convertFromRawToVhd(rawName, vhdName) {
test('blocks can be moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
const rawFileName = `${tempDir}/randomfile`
await createRandomFile(rawFileName, initalSize)
const vhdFileName = `${tempDir}/randomfile.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, vhdFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd._freeFirstBlockSpace(8000000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
await fs.readFile('randomfile')
const recoveredFileName = `${tempDir}/recovered`
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(
await fs.readFile(rawFileName)
)
})
test('the BAT MSB is not used for sign', async () => {
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
const handler = getHandler({ url: 'file://' + process.cwd() })
const vhd = new Vhd(handler, 'empty.vhd')
const emptyFileName = `${tempDir}/empty.vhd`
await execa('qemu-img', ['create', '-fvpc', emptyFileName, '1.8T'])
const handler = getHandler({ url: 'file://' })
const vhd = new Vhd(handler, emptyFileName)
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
// we want the bit 31 to be on, to prove it's not been used for sign
const hugeWritePositionSectors = Math.pow(2, 31) + 200
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
await checkFile('empty.vhd')
await checkFile(emptyFileName)
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
await vhd._freeFirstBlockSpace(hugePositionBytes)
@@ -85,9 +91,10 @@ test('the BAT MSB is not used for sign', async () => {
// we recover the data manually for speed reasons.
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
// hole before the block of data
const recoveredFile = await fs.open('recovered', 'w')
const recoveredFileName = `${tempDir}/recovered`
const recoveredFile = await fs.open(recoveredFileName, 'w')
try {
const vhd2 = new Vhd(handler, 'empty.vhd')
const vhd2 = new Vhd(handler, emptyFileName)
await vhd2.readHeaderAndFooter()
await vhd2.readBlockAllocationTable()
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
@@ -107,7 +114,7 @@ test('the BAT MSB is not used for sign', async () => {
fs.close(recoveredFile)
}
const recovered = await getStream.buffer(
await fs.createReadStream('recovered', {
await fs.createReadStream(recoveredFileName, {
start: hugePositionBytes,
end: hugePositionBytes + randomBuffer.length - 1,
})
@@ -117,27 +124,33 @@ test('the BAT MSB is not used for sign', async () => {
test('writeData on empty file', async () => {
const mbOfRandom = 3
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, emptyFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(0, randomData)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
const recoveredFileName = `${tempDir}/recovered`
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
})
test('writeData in 2 non-overlaping operations', async () => {
const mbOfRandom = 3
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, emptyFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
const splitPointSectors = 2
@@ -146,18 +159,21 @@ test('writeData in 2 non-overlaping operations', async () => {
splitPointSectors,
randomData.slice(splitPointSectors * 512)
)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
})
test('writeData in 2 overlaping operations', async () => {
const mbOfRandom = 3
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, emptyFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
const endFirstWrite = 3
@@ -167,119 +183,138 @@ test('writeData in 2 overlaping operations', async () => {
startSecondWrite,
randomData.slice(startSecondWrite * 512)
)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
})
test('BAT can be extended and blocks moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
const rawFileName = `${tempDir}/randomfile`
const recoveredFileName = `${tempDir}/recovered`
const vhdFileName = `${tempDir}/randomfile.vhd`
await createRandomFile(rawFileName, initalSize)
await convertFromRawToVhd(rawFileName, vhdFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new Vhd(handler, vhdFileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.ensureBatSize(2000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
await fs.readFile('randomfile')
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(
await fs.readFile(rawFileName)
)
})
test('coalesce works with empty parent files', async () => {
const mbOfRandom = 2
await createRandomFile('randomfile', mbOfRandom)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const rawFileName = `${tempDir}/randomfile`
const emptyFileName = `${tempDir}/empty.vhd`
const vhdFileName = `${tempDir}/randomfile.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(rawFileName, mbOfRandom)
await convertFromRawToVhd(rawFileName, vhdFileName)
await execa('qemu-img', [
'create',
'-fvpc',
'empty.vhd',
emptyFileName,
mbOfRandom + 1 + 'M',
])
await checkFile('randomfile.vhd')
await checkFile('empty.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
await checkFile('randomfile.vhd')
await checkFile('empty.vhd')
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
await fs.readFile('randomfile')
await checkFile(vhdFileName)
await checkFile(emptyFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler._getSize(rawFileName)
await chainVhd(handler, emptyFileName, handler, vhdFileName, true)
await checkFile(vhdFileName)
await checkFile(emptyFileName)
await vhdMerge(handler, emptyFileName, handler, vhdFileName)
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(
await fs.readFile(rawFileName)
)
})
test('coalesce works in normal cases', async () => {
const mbOfRandom = 5
await createRandomFile('randomfile', mbOfRandom)
await createRandomFile('small_randomfile', Math.ceil(mbOfRandom / 2))
const randomFileName = `${tempDir}/randomfile`
const random2FileName = `${tempDir}/randomfile2`
const smallRandomFileName = `${tempDir}/small_randomfile`
const parentFileName = `${tempDir}/parent.vhd`
const child1FileName = `${tempDir}/child1.vhd`
const child2FileName = `${tempDir}/child2.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(randomFileName, mbOfRandom)
await createRandomFile(smallRandomFileName, Math.ceil(mbOfRandom / 2))
await execa('qemu-img', [
'create',
'-fvpc',
'parent.vhd',
parentFileName,
mbOfRandom + 1 + 'M',
])
await convertFromRawToVhd('randomfile', 'child1.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
const vhd = new Vhd(handler, 'child2.vhd')
await convertFromRawToVhd(randomFileName, child1FileName)
const handler = getHandler({ url: 'file://' })
await execa('vhd-util', [
'snapshot',
'-n',
child2FileName,
'-p',
child1FileName,
])
const vhd = new Vhd(handler, child2FileName)
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
vhd.footer.creatorApplication = 'xoa'
await vhd.writeFooter()
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
await execa('vhd-util', ['check', '-t', '-n', 'child1.vhd'])
await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true)
await execa('vhd-util', ['check', '-t', '-n', 'child2.vhd'])
const smallRandom = await fs.readFile('small_randomfile')
const newVhd = new Vhd(handler, 'child2.vhd')
const originalSize = await handler._getSize(randomFileName)
await chainVhd(handler, parentFileName, handler, child1FileName, true)
await execa('vhd-util', ['check', '-t', '-n', child1FileName])
await chainVhd(handler, child1FileName, handler, child2FileName, true)
await execa('vhd-util', ['check', '-t', '-n', child2FileName])
const smallRandom = await fs.readFile(smallRandomFileName)
const newVhd = new Vhd(handler, child2FileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(5, smallRandom)
await checkFile('child2.vhd')
await checkFile('child1.vhd')
await checkFile('parent.vhd')
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
await checkFile('parent.vhd')
await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true)
await checkFile('child2.vhd')
await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd')
await checkFile('parent.vhd')
await recoverRawContent(
'parent.vhd',
'recovered_from_coalescing',
originalSize
)
await execa('cp', ['randomfile', 'randomfile2'])
const fd = await fs.open('randomfile2', 'r+')
await checkFile(child2FileName)
await checkFile(child1FileName)
await checkFile(parentFileName)
await vhdMerge(handler, parentFileName, handler, child1FileName)
await checkFile(parentFileName)
await chainVhd(handler, parentFileName, handler, child2FileName, true)
await checkFile(child2FileName)
await vhdMerge(handler, parentFileName, handler, child2FileName)
await checkFile(parentFileName)
await recoverRawContent(parentFileName, recoveredFileName, originalSize)
await execa('cp', [randomFileName, random2FileName])
const fd = await fs.open(random2FileName, 'r+')
try {
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
} finally {
await fs.close(fd)
}
expect(await fs.readFile('recovered_from_coalescing')).toEqual(
await fs.readFile('randomfile2')
expect(await fs.readFile(recoveredFileName)).toEqual(
await fs.readFile(random2FileName)
)
})
test('createSyntheticStream passes vhd-util check', async () => {
test.only('createSyntheticStream passes vhd-util check', async () => {
const initalSize = 4
const expectedVhdSize = 4197888
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
const stream = await createSyntheticStream(handler, 'randomfile.vhd')
expect(stream.length).toEqual(expectedVhdSize)
await fromEvent(
stream.pipe(await fs.createWriteStream('recovered.vhd')),
'finish'
const rawFileName = `${tempDir}/randomfile`
const vhdFileName = `${tempDir}/randomfile.vhd`
const recoveredVhdFileName = `${tempDir}/recovered.vhd`
await createRandomFile(rawFileName, initalSize)
await convertFromRawToVhd(rawFileName, vhdFileName)
await checkFile(vhdFileName)
const handler = getHandler({ url: 'file://' })
const stream = await createSyntheticStream(handler, vhdFileName)
const expectedVhdSize = (await fs.stat(vhdFileName)).size
expect(stream.length).toEqual((await fs.stat(vhdFileName)).size)
await pFromCallback(cb =>
pipeline(stream, fs.createWriteStream(recoveredVhdFileName), cb)
)
await checkFile('recovered.vhd')
const stats = await fs.stat('recovered.vhd')
await checkFile(recoveredVhdFileName)
const stats = await fs.stat(recoveredVhdFileName)
expect(stats.size).toEqual(expectedVhdSize)
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
await execa('qemu-img', ['compare', recoveredVhdFileName, rawFileName])
})

View File

@@ -1,12 +1,13 @@
{
"name": "vhd-lib",
"version": "0.5.1",
"version": "0.6.0",
"license": "AGPL-3.0",
"description": "Primitives for VHD file handling",
"keywords": [],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/vhd-lib",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -21,11 +22,11 @@
},
"dependencies": {
"async-iterator-to-stream": "^1.0.2",
"core-js": "3.0.0-beta.3",
"core-js": "3.0.0",
"from2": "^2.3.0",
"fs-extra": "^7.0.0",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.11.0",
"promise-toolbox": "^0.12.1",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1"
},
@@ -34,13 +35,14 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"@xen-orchestra/fs": "^0.6.0",
"@xen-orchestra/fs": "^0.8.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"execa": "^1.0.0",
"fs-promise": "^2.0.0",
"get-stream": "^4.0.0",
"index-modules": "^0.3.0",
"readable-stream": "^3.0.6",
"rimraf": "^2.6.2",
"tmp": "^0.0.33"
},

View File

@@ -0,0 +1,20 @@
import assert from 'assert'
import {
DISK_TYPE_DIFFERENCING,
DISK_TYPE_DYNAMIC,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
} from './_constants'
export default footer => {
assert.strictEqual(footer.cookie, FOOTER_COOKIE)
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
assert(footer.originalSize <= footer.currentSize)
assert(
footer.diskType === DISK_TYPE_DIFFERENCING ||
footer.diskType === DISK_TYPE_DYNAMIC
)
}

View File

@@ -0,0 +1,14 @@
import assert from 'assert'
import { HEADER_COOKIE, HEADER_VERSION, SECTOR_SIZE } from './_constants'
export default (header, footer) => {
assert.strictEqual(header.cookie, HEADER_COOKIE)
assert.strictEqual(header.dataOffset, undefined)
assert.strictEqual(header.headerVersion, HEADER_VERSION)
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
if (footer !== undefined) {
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
}
}

View File

@@ -0,0 +1,47 @@
import assert from 'assert'
import { BLOCK_UNUSED } from './_constants'
// get the identifiers and first sectors of the first and last block
// in the file
export default bat => {
const n = bat.length
assert.notStrictEqual(n, 0)
assert.strictEqual(n % 4, 0)
let i = 0
let j = 0
let first, firstSector, last, lastSector
// get first allocated block for initialization
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
i += 1
j += 4
if (j === n) {
const error = new Error('no allocated block found')
error.noBlock = true
throw error
}
}
lastSector = firstSector
first = last = i
while (j < n) {
const sector = bat.readUInt32BE(j)
if (sector !== BLOCK_UNUSED) {
if (sector < firstSector) {
first = i
firstSector = sector
} else if (sector > lastSector) {
last = i
lastSector = sector
}
}
i += 1
j += 4
}
return { first, firstSector, last, lastSector }
}

View File

@@ -0,0 +1,50 @@
export default async function readChunk(stream, n) {
if (n === 0) {
return Buffer.alloc(0)
}
return new Promise((resolve, reject) => {
const chunks = []
let i = 0
function clean() {
stream.removeListener('readable', onReadable)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
function resolve2() {
clean()
resolve(Buffer.concat(chunks, i))
}
function onEnd() {
resolve2()
clean()
}
function onError(error) {
reject(error)
clean()
}
function onReadable() {
const chunk = stream.read(n - i)
if (chunk === null) {
return // wait for more data
}
i += chunk.length
chunks.push(chunk)
if (i >= n) {
resolve2()
}
}
stream.on('end', onEnd)
stream.on('error', onError)
stream.on('readable', onReadable)
if (stream.readable) {
onReadable()
}
})
}

View File

@@ -0,0 +1,93 @@
/* eslint-env jest */
import asyncIteratorToStream from 'async-iterator-to-stream'
import execa from 'execa'
import fs from 'fs-extra'
import rimraf from 'rimraf'
import getStream from 'get-stream'
import tmp from 'tmp'
import { createReadStream, createWriteStream } from 'fs'
import { pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { createVhdStreamWithLength } from '.'
import { FOOTER_SIZE } from './_constants'
let tempDir = null
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
async function convertFromRawToVhd(rawName, vhdName) {
await execa('qemu-img', ['convert', '-f', 'raw', '-Ovpc', rawName, vhdName])
}
async function createRandomFile(name, size) {
const createRandomStream = asyncIteratorToStream(function*(size) {
while (size-- > 0) {
yield Buffer.from([Math.floor(Math.random() * 256)])
}
})
const input = await createRandomStream(size)
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
}
test('createVhdStreamWithLength can extract length', async () => {
const initialSize = 4 * 1024
const rawFileName = `${tempDir}/randomfile`
const vhdName = `${tempDir}/randomfile.vhd`
const outputVhdName = `${tempDir}/output.vhd`
await createRandomFile(rawFileName, initialSize)
await convertFromRawToVhd(rawFileName, vhdName)
const vhdSize = fs.statSync(vhdName).size
const result = await createVhdStreamWithLength(
await createReadStream(vhdName)
)
expect(result.length).toEqual(vhdSize)
const outputFileStream = await createWriteStream(outputVhdName)
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
const outputSize = fs.statSync(outputVhdName).size
expect(outputSize).toEqual(vhdSize)
})
test('createVhdStreamWithLength can skip blank after last block and before footer', async () => {
const initialSize = 4 * 1024
const rawFileName = `${tempDir}/randomfile`
const vhdName = `${tempDir}/randomfile.vhd`
const outputVhdName = `${tempDir}/output.vhd`
await createRandomFile(rawFileName, initialSize)
await convertFromRawToVhd(rawFileName, vhdName)
const vhdSize = fs.statSync(vhdName).size
// read file footer
const footer = await getStream.buffer(
createReadStream(vhdName, { start: vhdSize - FOOTER_SIZE })
)
// we'll override the footer
const endOfFile = await createWriteStream(vhdName, {
flags: 'r+',
start: vhdSize - FOOTER_SIZE,
})
// write a blank over the previous footer
await pFromCallback(cb => endOfFile.write(Buffer.alloc(FOOTER_SIZE), cb))
// write the footer after the new blank
await pFromCallback(cb => endOfFile.end(footer, cb))
const longerSize = fs.statSync(vhdName).size
// check input file has been lengthened
expect(longerSize).toEqual(vhdSize + FOOTER_SIZE)
const result = await createVhdStreamWithLength(
await createReadStream(vhdName)
)
expect(result.length).toEqual(vhdSize)
const outputFileStream = await createWriteStream(outputVhdName)
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
const outputSize = fs.statSync(outputVhdName).size
// check out file has been shortened again
expect(outputSize).toEqual(vhdSize)
await execa('qemu-img', ['compare', outputVhdName, vhdName])
})

View File

@@ -0,0 +1,80 @@
import assert from 'assert'
import { pipeline, Transform } from 'readable-stream'
import checkFooter from './_checkFooter'
import checkHeader from './_checkHeader'
import noop from './_noop'
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
import readChunk from './_readChunk'
import { FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from './_constants'
import { fuFooter, fuHeader } from './_structs'
class EndCutterStream extends Transform {
constructor(footerOffset, footerBuffer) {
super()
this._footerOffset = footerOffset
this._footerBuffer = footerBuffer
this._position = 0
this._done = false
}
_transform(data, encoding, callback) {
if (!this._done) {
if (this._position + data.length >= this._footerOffset) {
this._done = true
const difference = this._footerOffset - this._position
data = data.slice(0, difference)
this.push(data)
this.push(this._footerBuffer)
} else {
this.push(data)
}
this._position += data.length
}
callback()
}
}
export default async function createVhdStreamWithLength(stream) {
const readBuffers = []
let streamPosition = 0
async function readStream(length) {
const chunk = await readChunk(stream, length)
assert.strictEqual(chunk.length, length)
streamPosition += chunk.length
readBuffers.push(chunk)
return chunk
}
const footerBuffer = await readStream(FOOTER_SIZE)
const footer = fuFooter.unpack(footerBuffer)
checkFooter(footer)
const header = fuHeader.unpack(await readStream(HEADER_SIZE))
checkHeader(header, footer)
await readStream(header.tableOffset - streamPosition)
const table = await readStream(header.maxTableEntries * 4)
readBuffers.reverse()
for (const buf of readBuffers) {
stream.unshift(buf)
}
const footerOffset =
getFirstAndLastBlocks(table).lastSector * SECTOR_SIZE +
Math.ceil(header.blockSize / SECTOR_SIZE / 8 / SECTOR_SIZE) * SECTOR_SIZE +
header.blockSize
// ignore any data after footerOffset and push footerBuffer
//
// this is necessary to ignore any blank space between the last block and the
// final footer which would invalidate the size we computed
const newStream = new EndCutterStream(footerOffset, footerBuffer)
pipeline(stream, newStream, noop)
newStream.length = footerOffset + FOOTER_SIZE
return newStream
}

View File

@@ -11,3 +11,6 @@ export {
} from './createReadableSparseStream'
export { default as createSyntheticStream } from './createSyntheticStream'
export { default as mergeVhd } from './merge'
export {
default as createVhdStreamWithLength,
} from './createVhdStreamWithLength'

View File

@@ -1,19 +1,16 @@
import assert from 'assert'
import { fromEvent } from 'promise-toolbox'
import checkFooter from './_checkFooter'
import checkHeader from './_checkHeader'
import constantStream from './_constant-stream'
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
import {
BLOCK_UNUSED,
DISK_TYPE_DIFFERENCING,
DISK_TYPE_DYNAMIC,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
HEADER_COOKIE,
HEADER_SIZE,
HEADER_VERSION,
PARENT_LOCATOR_ENTRIES,
PLATFORM_NONE,
PLATFORM_W2KU,
@@ -170,21 +167,10 @@ export default class Vhd {
}
const footer = (this.footer = fuFooter.unpack(bufFooter))
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
assert(footer.originalSize <= footer.currentSize)
assert(
footer.diskType === DISK_TYPE_DIFFERENCING ||
footer.diskType === DISK_TYPE_DYNAMIC
)
checkFooter(footer)
const header = (this.header = fuHeader.unpack(bufHeader))
assert.strictEqual(header.cookie, HEADER_COOKIE)
assert.strictEqual(header.dataOffset, undefined)
assert.strictEqual(header.headerVersion, HEADER_VERSION)
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
checkHeader(header, footer)
// Compute the number of sectors in one block.
// Default: One block contains 4096 sectors of 512 bytes.
@@ -242,49 +228,6 @@ export default class Vhd {
)
}
// get the identifiers and first sectors of the first and last block
// in the file
//
_getFirstAndLastBlocks() {
const n = this.header.maxTableEntries
const bat = this.blockTable
let i = 0
let j = 0
let first, firstSector, last, lastSector
// get first allocated block for initialization
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
i += 1
j += 4
if (i === n) {
const error = new Error('no allocated block found')
error.noBlock = true
throw error
}
}
lastSector = firstSector
first = last = i
while (i < n) {
const sector = bat.readUInt32BE(j)
if (sector !== BLOCK_UNUSED) {
if (sector < firstSector) {
first = i
firstSector = sector
} else if (sector > lastSector) {
last = i
lastSector = sector
}
}
i += 1
j += 4
}
return { first, firstSector, last, lastSector }
}
// =================================================================
// Write functions.
// =================================================================
@@ -311,7 +254,9 @@ export default class Vhd {
async _freeFirstBlockSpace(spaceNeededBytes) {
try {
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
const { first, firstSector, lastSector } = getFirstAndLastBlocks(
this.blockTable
)
const tableOffset = this.header.tableOffset
const { batSize } = this
const newMinSector = Math.ceil(

View File

@@ -4,22 +4,20 @@ import rimraf from 'rimraf'
import tmp from 'tmp'
import { createWriteStream, readFile } from 'fs-promise'
import { fromEvent, pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { createReadableRawStream, createReadableSparseStream } from './'
import { createFooter } from './src/_createFooterHeader'
const initialDir = process.cwd()
let tempDir = null
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('createFooter() does not crash', () => {
@@ -55,9 +53,10 @@ test('ReadableRawVHDStream does not crash', async () => {
}
const fileSize = 1000
const stream = createReadableRawStream(fileSize, mockParser)
const pipe = stream.pipe(createWriteStream('output.vhd'))
await fromEvent(pipe, 'finish')
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
await pFromCallback(cb =>
pipeline(stream, createWriteStream(`${tempDir}/output.vhd`), cb)
)
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
})
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
@@ -87,9 +86,9 @@ test('ReadableRawVHDStream detects when blocks are out of order', async () => {
new Promise((resolve, reject) => {
const stream = createReadableRawStream(100000, mockParser)
stream.on('error', reject)
const pipe = stream.pipe(createWriteStream('outputStream'))
pipe.on('finish', resolve)
pipe.on('error', reject)
pipeline(stream, createWriteStream(`${tempDir}/outputStream`), err =>
err ? reject(err) : resolve()
)
})
).rejects.toThrow('Received out of order blocks')
})
@@ -114,19 +113,19 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
blocks
)
expect(stream.length).toEqual(4197888)
const pipe = stream.pipe(createWriteStream('output.vhd'))
const pipe = stream.pipe(createWriteStream(`${tempDir}/output.vhd`))
await fromEvent(pipe, 'finish')
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
await execa('qemu-img', [
'convert',
'-f',
'vpc',
'-O',
'raw',
'output.vhd',
'out1.raw',
`${tempDir}/output.vhd`,
`${tempDir}/out1.raw`,
])
const out1 = await readFile('out1.raw')
const out1 = await readFile(`${tempDir}/out1.raw`)
const expected = Buffer.alloc(fileSize)
blocks.forEach(b => {
b.data.copy(expected, b.offsetBytes)

View File

@@ -15,6 +15,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xapi-explore-sr",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xapi-explore-sr",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -40,7 +41,7 @@
"human-format": "^0.10.0",
"lodash": "^4.17.4",
"pw": "^0.0.4",
"xen-api": "^0.24.1"
"xen-api": "^0.24.6"
},
"devDependencies": {
"@babel/cli": "^7.1.5",

View File

@@ -95,7 +95,7 @@ root@xen1.company.net> xapi.pool.$master.name_label
To ease searches, `find()` and `findAll()` functions are available:
```
root@xen1.company.net> findAll({ $type: 'vm' }).length
root@xen1.company.net> findAll({ $type: 'VM' }).length
183
```

View File

@@ -4,7 +4,7 @@ const { PassThrough, pipeline } = require('readable-stream')
const humanFormat = require('human-format')
const Throttle = require('throttle')
const { isOpaqueRef } = require('../')
const isOpaqueRef = require('../dist/_isOpaqueRef').default
exports.createInputStream = path => {
if (path === undefined || path === '-') {

View File

@@ -1,6 +1,6 @@
{
"name": "xen-api",
"version": "0.24.1",
"version": "0.24.6",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [
@@ -13,6 +13,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xen-api",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xen-api",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -36,16 +37,16 @@
"debug": "^4.0.1",
"event-to-promise": "^0.8.0",
"exec-promise": "^0.7.0",
"http-request-plus": "^0.7.1",
"iterable-backoff": "^0.0.0",
"jest-diff": "^23.5.0",
"http-request-plus": "^0.8.0",
"iterable-backoff": "^0.1.0",
"jest-diff": "^24.0.0",
"json-rpc-protocol": "^0.13.1",
"kindof": "^2.0.0",
"lodash": "^4.17.4",
"make-error": "^1.3.0",
"minimist": "^1.2.0",
"ms": "^2.1.1",
"promise-toolbox": "^0.11.0",
"promise-toolbox": "^0.12.1",
"pw": "0.0.4",
"xmlrpc": "^1.3.2",
"xo-collection": "^0.4.1"
@@ -53,7 +54,10 @@
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/plugin-proposal-class-properties": "^7.3.4",
"@babel/plugin-proposal-decorators": "^7.0.0",
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.2.0",
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",

View File

@@ -0,0 +1,30 @@
import { BaseError } from 'make-error'
export default class XapiError extends BaseError {
static wrap(error) {
let code, params
if (Array.isArray(error)) {
// < XenServer 7.3
;[code, ...params] = error
} else {
code = error.message
params = error.data
if (!Array.isArray(params)) {
params = []
}
}
return new XapiError(code, params)
}
constructor(code, params) {
super(`${code}(${params.join(', ')})`)
this.code = code
this.params = params
// slots than can be assigned later
this.call = undefined
this.url = undefined
this.task = undefined
}
}

View File

@@ -0,0 +1,15 @@
// decorates fn so that more than one concurrent calls will be coalesced
export default function coalesceCalls(fn) {
let promise
const clean = () => {
promise = undefined
}
return function() {
if (promise !== undefined) {
return promise
}
promise = fn.apply(this, arguments)
promise.then(clean, clean)
return promise
}
}

View File

@@ -0,0 +1,26 @@
/* eslint-env jest */
import pDefer from 'promise-toolbox/defer'
import coalesceCalls from './_coalesceCalls'
describe('coalesceCalls', () => {
it('decorates an async function', async () => {
const fn = coalesceCalls(promise => promise)
const defer1 = pDefer()
const promise1 = fn(defer1.promise)
const defer2 = pDefer()
const promise2 = fn(defer2.promise)
defer1.resolve('foo')
expect(await promise1).toBe('foo')
expect(await promise2).toBe('foo')
const defer3 = pDefer()
const promise3 = fn(defer3.promise)
defer3.resolve('bar')
expect(await promise3).toBe('bar')
})
})

View File

@@ -0,0 +1,3 @@
import debug from 'debug'
export default debug('xen-api')

View File

@@ -0,0 +1,22 @@
import { Cancel } from 'promise-toolbox'
import XapiError from './_XapiError'
export default task => {
const { status } = task
if (status === 'cancelled') {
return Promise.reject(new Cancel('task canceled'))
}
if (status === 'failure') {
const error = XapiError.wrap(task.error_info)
error.task = task
return Promise.reject(error)
}
if (status === 'success') {
// the result might be:
// - empty string
// - an opaque reference
// - an XML-RPC value
return Promise.resolve(task.result)
}
}

View File

@@ -0,0 +1,3 @@
const SUFFIX = '.get_all_records'
export default method => method.endsWith(SUFFIX)

View File

@@ -0,0 +1,3 @@
const PREFIX = 'OpaqueRef:'
export default value => typeof value === 'string' && value.startsWith(PREFIX)

View File

@@ -0,0 +1,4 @@
const RE = /^[^.]+\.get_/
export default (method, args) =>
args.length === 1 && typeof args[0] === 'string' && RE.test(method)

View File

@@ -0,0 +1,8 @@
export default (setting, defaultValue) =>
setting === undefined
? () => defaultValue
: typeof setting === 'function'
? setting
: typeof setting === 'object'
? method => setting[method] ?? setting['*'] ?? defaultValue
: () => setting

View File

@@ -0,0 +1,18 @@
const URL_RE = /^(?:(https?:)\/*)?(?:([^:]+):([^@]+)@)?([^/]+?)(?::([0-9]+))?\/?$/
export default url => {
const matches = URL_RE.exec(url)
if (matches === null) {
throw new Error('invalid URL: ' + url)
}
const [, protocol = 'https:', username, password, hostname, port] = matches
const parsedUrl = { protocol, hostname, port }
if (username !== undefined) {
parsedUrl.username = decodeURIComponent(username)
}
if (password !== undefined) {
parsedUrl.password = decodeURIComponent(password)
}
return parsedUrl
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
import makeError from 'make-error'
export default makeError('UnsupportedTransport')

View File

@@ -0,0 +1,25 @@
// Prepare values before passing them to the XenAPI:
//
// - cast integers to strings
export default function prepare(param) {
if (Number.isInteger(param)) {
return String(param)
}
if (typeof param !== 'object' || param === null) {
return param
}
if (Array.isArray(param)) {
return param.map(prepare)
}
const values = {}
Object.keys(param).forEach(key => {
const value = param[key]
if (value !== undefined) {
values[key] = prepare(value)
}
})
return values
}

View File

@@ -1,3 +0,0 @@
import makeError from 'make-error'
export const UnsupportedTransport = makeError('UnsupportedTransport')

View File

@@ -1,7 +1,7 @@
import jsonRpc from './json-rpc'
import UnsupportedTransport from './_UnsupportedTransport'
import xmlRpc from './xml-rpc'
import xmlRpcJson from './xml-rpc-json'
import { UnsupportedTransport } from './_utils'
const factories = [jsonRpc, xmlRpcJson, xmlRpc]
const { length } = factories

View File

@@ -1,8 +1,9 @@
import httpRequestPlus from 'http-request-plus'
import { format, parse } from 'json-rpc-protocol'
import { UnsupportedTransport } from './_utils'
import UnsupportedTransport from './_UnsupportedTransport'
// https://github.com/xenserver/xenadmin/blob/0df39a9d83cd82713f32d24704852a0fd57b8a64/XenModel/XenAPI/Session.cs#L403-L433
export default ({ allowUnauthorized, url }) => {
return (method, args) =>
httpRequestPlus

View File

@@ -1,7 +1,8 @@
import { createClient, createSecureClient } from 'xmlrpc'
import { promisify } from 'promise-toolbox'
import { UnsupportedTransport } from './_utils'
import prepareXmlRpcParams from './_prepareXmlRpcParams'
import UnsupportedTransport from './_UnsupportedTransport'
const logError = error => {
if (error.res) {
@@ -71,10 +72,7 @@ const parseResult = result => {
throw new UnsupportedTransport()
}
export default ({
allowUnauthorized,
url: { hostname, path, port, protocol },
}) => {
export default ({ allowUnauthorized, url: { hostname, port, protocol } }) => {
const client = (protocol === 'https:' ? createSecureClient : createClient)({
host: hostname,
path: '/json',
@@ -83,5 +81,6 @@ export default ({
})
const call = promisify(client.methodCall, client)
return (method, args) => call(method, args).then(parseResult, logError)
return (method, args) =>
call(method, prepareXmlRpcParams(args)).then(parseResult, logError)
}

View File

@@ -1,6 +1,8 @@
import { createClient, createSecureClient } from 'xmlrpc'
import { promisify } from 'promise-toolbox'
import prepareXmlRpcParams from './_prepareXmlRpcParams'
const logError = error => {
if (error.res) {
console.error(
@@ -30,10 +32,7 @@ const parseResult = result => {
return result.Value
}
export default ({
allowUnauthorized,
url: { hostname, path, port, protocol },
}) => {
export default ({ allowUnauthorized, url: { hostname, port, protocol } }) => {
const client = (protocol === 'https:' ? createSecureClient : createClient)({
host: hostname,
port,
@@ -41,5 +40,6 @@ export default ({
})
const call = promisify(client.methodCall, client)
return (method, args) => call(method, args).then(parseResult, logError)
return (method, args) =>
call(method, prepareXmlRpcParams(args)).then(parseResult, logError)
}

View File

@@ -7,6 +7,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-acl-resolver",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-acl-resolver",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -12,6 +12,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-cli",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-cli",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -33,7 +34,7 @@
"chalk": "^2.2.0",
"exec-promise": "^0.7.0",
"fs-promise": "^2.0.3",
"http-request-plus": "^0.7.1",
"http-request-plus": "^0.8.0",
"human-format": "^0.10.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",
@@ -42,7 +43,7 @@
"nice-pipe": "0.0.0",
"pretty-ms": "^4.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.11.0",
"promise-toolbox": "^0.12.1",
"pump": "^3.0.0",
"pw": "^0.0.4",
"strip-indent": "^2.0.0",

View File

@@ -7,6 +7,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-collection",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-collection",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -1,7 +1,7 @@
import kindOf from 'kindof'
import { BaseError } from 'make-error'
import { EventEmitter } from 'events'
import { forEach } from 'lodash'
import { forOwn } from 'lodash'
import isEmpty from './is-empty'
import isObject from './is-object'
@@ -10,6 +10,7 @@ import isObject from './is-object'
const {
create: createObject,
keys,
prototype: { hasOwnProperty },
} = Object
@@ -63,6 +64,16 @@ export class NoSuchItem extends BaseError {
// -------------------------------------------------------------------
const assertValidKey = key => {
if (!isValidKey(key)) {
throw new InvalidKey(key)
}
}
const isValidKey = key => typeof key === 'number' || typeof key === 'string'
// -------------------------------------------------------------------
export default class Collection extends EventEmitter {
constructor() {
super()
@@ -71,7 +82,7 @@ export default class Collection extends EventEmitter {
this._buffering = 0
this._indexes = createObject(null)
this._indexedItems = createObject(null)
this._items = {} // createObject(null)
this._items = createObject(null)
this._size = 0
}
@@ -113,7 +124,7 @@ export default class Collection extends EventEmitter {
}
clear() {
forEach(this._items, (_, key) => this._remove(key))
keys(this._items).forEach(this._remove, this)
}
remove(keyOrObjectWithId) {
@@ -176,8 +187,7 @@ export default class Collection extends EventEmitter {
return defaultValue
}
// Throws a NoSuchItem.
this._assertHas(key)
throw new NoSuchItem(key)
}
has(key) {
@@ -189,7 +199,7 @@ export default class Collection extends EventEmitter {
// -----------------------------------------------------------------
createIndex(name, index) {
const { _indexes: indexes } = this
const indexes = this._indexes
if (hasOwnProperty.call(indexes, name)) {
throw new DuplicateIndex(name)
}
@@ -201,7 +211,7 @@ export default class Collection extends EventEmitter {
}
deleteIndex(name) {
const { _indexes: indexes } = this
const indexes = this._indexes
if (!hasOwnProperty.call(indexes, name)) {
throw new NoSuchIndex(name)
}
@@ -218,7 +228,7 @@ export default class Collection extends EventEmitter {
// -----------------------------------------------------------------
*[Symbol.iterator]() {
const { _items: items } = this
const items = this._items
for (const key in items) {
yield [key, items[key]]
@@ -226,7 +236,7 @@ export default class Collection extends EventEmitter {
}
*keys() {
const { _items: items } = this
const items = this._items
for (const key in items) {
yield key
@@ -234,7 +244,7 @@ export default class Collection extends EventEmitter {
}
*values() {
const { _items: items } = this
const items = this._items
for (const key in items) {
yield items[key]
@@ -255,11 +265,11 @@ export default class Collection extends EventEmitter {
}
called = true
if (--this._buffering) {
if (--this._buffering !== 0) {
return
}
const { _buffer: buffer } = this
const buffer = this._buffer
// Due to deduplication there could be nothing in the buffer.
if (isEmpty(buffer)) {
@@ -276,7 +286,7 @@ export default class Collection extends EventEmitter {
data[buffer[key]][key] = this._items[key]
}
forEach(data, (items, action) => {
forOwn(data, (items, action) => {
if (!isEmpty(items)) {
this.emit(action, items)
}
@@ -306,16 +316,6 @@ export default class Collection extends EventEmitter {
}
}
_assertValidKey(key) {
if (!this._isValidKey(key)) {
throw new InvalidKey(key)
}
}
_isValidKey(key) {
return typeof key === 'number' || typeof key === 'string'
}
_remove(key) {
delete this._items[key]
this._size--
@@ -324,17 +324,17 @@ export default class Collection extends EventEmitter {
_resolveItem(keyOrObjectWithId, valueIfKey = undefined) {
if (valueIfKey !== undefined) {
this._assertValidKey(keyOrObjectWithId)
assertValidKey(keyOrObjectWithId)
return [keyOrObjectWithId, valueIfKey]
}
if (this._isValidKey(keyOrObjectWithId)) {
if (isValidKey(keyOrObjectWithId)) {
return [keyOrObjectWithId]
}
const key = this.getKey(keyOrObjectWithId)
this._assertValidKey(key)
assertValidKey(key)
return [key, keyOrObjectWithId]
}
@@ -347,7 +347,7 @@ export default class Collection extends EventEmitter {
}
if (action === ACTION_ADD) {
this._buffer[key] = this._buffer[key] ? ACTION_UPDATE : ACTION_ADD
this._buffer[key] = key in this._buffer ? ACTION_UPDATE : ACTION_ADD
} else if (action === ACTION_REMOVE) {
if (this._buffer[key] === ACTION_ADD) {
delete this._buffer[key]
@@ -356,7 +356,7 @@ export default class Collection extends EventEmitter {
}
} else {
// update
if (!this._buffer[key]) {
if (!(key in this._buffer)) {
this._buffer[key] = ACTION_UPDATE
}
}

View File

@@ -7,6 +7,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-common",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-common",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -16,6 +16,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-import-servers-csv",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-import-servers-csv",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -42,7 +43,7 @@
"xo-lib": "^0.9.0"
},
"devDependencies": {
"@types/node": "^10.12.2",
"@types/node": "^11.11.4",
"@types/through2": "^2.0.31",
"tslint": "^5.9.1",
"tslint-config-standard": "^8.0.1",

View File

@@ -1,8 +1,3 @@
declare module 'csv-parser' {
function csvParser(opts?: Object): any
export = csvParser
}
declare module 'exec-promise' {
function execPromise(cb: (args: string[]) => any): void
export = execPromise

View File

@@ -11,6 +11,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-lib",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-lib",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -7,6 +7,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-remote-parser",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-remote-parser",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -12,6 +12,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-auth-github",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-auth-github",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-google",
"version": "0.2.0",
"version": "0.2.1",
"license": "AGPL-3.0",
"description": "Google authentication plugin for XO-Server",
"keywords": [
@@ -15,6 +15,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-auth-google",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-auth-google",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -32,7 +33,7 @@
"node": ">=6"
},
"dependencies": {
"passport-google-oauth20": "^1.0.0"
"passport-google-oauth20": "^2.0.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -14,6 +14,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-auth-ldap",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-auth-ldap",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -38,7 +39,7 @@
"inquirer": "^6.0.0",
"ldapjs": "^1.0.1",
"lodash": "^4.17.4",
"promise-toolbox": "^0.11.0"
"promise-toolbox": "^0.12.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-saml",
"version": "0.5.2",
"version": "0.5.3",
"license": "AGPL-3.0",
"description": "SAML authentication plugin for XO-Server",
"keywords": [
@@ -15,6 +15,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-auth-saml",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-auth-saml",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -42,7 +42,12 @@ class AuthSamlXoPlugin {
configure({ usernameField, ...conf }) {
this._usernameField = usernameField
this._conf = conf
this._conf = {
...conf,
// must match the callback URL
path: '/signin/saml/callback',
}
}
load() {

View File

@@ -18,6 +18,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-backup-reports",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-backup-reports",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -154,6 +154,10 @@ class BackupReportsXoPlugin {
}
_wrapper(status, job, schedule, runJobId) {
if (job.type === 'metadataBackup') {
return
}
return new Promise(resolve =>
resolve(
job.type === 'backup'

View File

@@ -14,6 +14,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-cloud",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-cloud",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -31,7 +32,7 @@
"node": ">=6"
},
"dependencies": {
"http-request-plus": "^0.7.1",
"http-request-plus": "^0.8.0",
"jsonrpc-websocket-client": "^0.4.1"
},
"devDependencies": {

View File

@@ -13,6 +13,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-load-balancer",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-load-balancer",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -7,6 +7,7 @@
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-perf-alert",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-perf-alert",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},

View File

@@ -673,8 +673,9 @@ ${entry.listItem}
}
}
async getRrd(xoObject, secondsAgo) {
const host = xoObject.$type === 'host' ? xoObject : xoObject.$resident_on
async getRrd(xapiObject, secondsAgo) {
const host =
xapiObject.$type === 'host' ? xapiObject : xapiObject.$resident_on
if (host == null) {
return null
}
@@ -685,13 +686,13 @@ ${entry.listItem}
host,
query: {
cf: 'AVERAGE',
host: (xoObject.$type === 'host').toString(),
host: (xapiObject.$type === 'host').toString(),
json: 'true',
start: serverTimestamp - secondsAgo,
},
}
if (xoObject.$type === 'vm') {
payload['vm_uuid'] = xoObject.uuid
if (xapiObject.$type === 'VM') {
payload['vm_uuid'] = xapiObject.uuid
}
// JSON is not well formed, can't use the default node parser
return JSON5.parse(

Some files were not shown because too many files have changed in this diff Show More