Compare commits

..

230 Commits

Author SHA1 Message Date
Julien Fontanet
8e9946b645 feat(complex-matcher): support datetime comparison
```
<-2w
```
2020-10-23 12:19:24 +02:00
Julien Fontanet
4911bbe3a2 chore: normalize packages 2020-10-23 10:21:52 +02:00
Julien Fontanet
e0b6ab3f8a feat(value-matcher/README): document patterns 2020-10-23 10:21:52 +02:00
Rajaa.BARHTAOUI
8736c2cf9a feat(xo-server,xo-web/VIF): require network ACLs to change VIF locking mode (#5283)
See xoa-support#2929
2020-10-23 10:18:29 +02:00
Rajaa.BARHTAOUI
d825c33b55 feat(xo-server,xo-web/networks): ability to change a VIF's network for self users (#5203)
Fixes #5020
2020-10-23 10:05:03 +02:00
badrAZ
171ecaaf62 feat(xo-server, xo-web/host): display installed certificates (#5319)
See #5134
2020-10-21 14:36:55 +02:00
Julien Fontanet
5e6d5d4eb0 docs(xo-server): file restoration (#5336) 2020-10-20 15:35:16 +02:00
Rajaa.BARHTAOUI
3733a3c335 fix(xo-web): component names must be in PascalCase (#5322) 2020-10-20 15:19:59 +02:00
badrAZ
7fca6defd6 fix(xo-web, xo-server): fix host power state (#5288)
Fixes #4919
2020-10-20 10:01:48 +02:00
Julien Fontanet
2a270b399e fix(xo-server/getRemoteHandler): synchronize to avoid race conds during sync 2020-10-19 22:35:13 +02:00
Julien Fontanet
64109aee05 chore(vhd-lib/Vhd#_createBlock): assert block not already allocated 2020-10-19 22:33:58 +02:00
Julien Fontanet
e1d9395128 feat(vhd-lib/Vhd#_createBlock): dont initialize block
It's unnecessary, the block is written right after creation.
2020-10-19 22:30:57 +02:00
Julien Fontanet
32eec95c26 feat(@xen-orchestra/backups-cli): 0.2.1 2020-10-19 17:15:00 +02:00
badrAZ
f41cca45aa feat(audit-core, xo-server-audit): ability to delete a range of records (#5277) 2020-10-19 13:09:08 +02:00
Rajaa.BARHTAOUI
48eeab974c feat(xo-web/backup/restore): load backups in parallel (#5278)
See #5176
2020-10-19 11:47:33 +02:00
Pierre Donias
eed44156ae feat: release 5.51.1 (#5328) 2020-10-14 16:12:37 +02:00
Pierre Donias
1177d9bdd8 feat: technical release (#5327) 2020-10-14 15:23:00 +02:00
badrAZ
d151a94285 feat(xo-server, xo-web/proxy): ability to update http proxy cfg (#5148) 2020-10-14 14:43:33 +02:00
badrAZ
a7fe6453ee feat(xo-server#_migrateVmWithStorageMotion): improve migration error (#5306)
Fixes #5282
2020-10-14 13:17:55 +02:00
Rajaa.BARHTAOUI
313eb136f4 fix(xo-web): some methods call throw 'not enough permissions' (#5303)
See #5285
2020-10-13 15:30:06 +02:00
Mathieu
98591ff83d fix(xo-server/deleteResourceSet): detach VMs from resource set (#5312)
Fixes #4797
2020-10-12 11:33:29 +02:00
Mathieu
0b9d78560b feat(xo-web/home): better sort of bulk VM migrate selector (#5308)
Fixes #4462
2020-10-12 10:22:24 +02:00
Rajaa.BARHTAOUI
32a930e598 feat(xo-web/vm/network): remove unused method (#5316)
Introduced by c4d96fbc49
2020-10-12 10:05:57 +02:00
Rajaa.BARHTAOUI
edd8512196 feat(xo-web/vm): hide backup tab for non-admin users (#5317)
Fixes #5309
2020-10-12 10:02:08 +02:00
Mathieu
7a6aec34ae fix(xo-web/ips): fix the range formatting (#5314)
Fixes #3170
2020-10-09 17:15:50 +02:00
badrAZ
009a0c5703 feat(xo-web/backup/overview): add link from backup job/schedule to corresponding logs (#5260)
Fixes #4564
2020-10-09 14:54:23 +02:00
badrAZ
a99086b6bd chore(xo-web/log-alert-body): remove unused subscription (#5292)
Introduced By 3370014ddf
2020-10-07 15:17:29 +02:00
badrAZ
a186672447 feat(xo-web/sorted-table): ability to collapse actions (#5311)
See #5148
2020-10-07 11:13:10 +02:00
badrAZ
0b8a7c0d09 feat(xo-web/backup/new): enable created schedules by default (#5280)
See xoa-support#2921
2020-10-07 10:36:10 +02:00
BenjiReis
1990bf3d7a fix(sdn-controller): use correct bridge address to create tunnels (#5281)
Fixes xoa-support#2919
2020-10-06 23:54:02 +02:00
Julien Fontanet
ea74a7e401 fix(backups-cli clean-vms): fix limit-concurrency-decorator import 2020-10-06 18:35:35 +02:00
Nicolas Raynaud
bf12c3ff74 fix(xo-server/backup-ng): use getRemoteWithCredentials (#5315)
Introduced in #4907

Fix #5253

Otherwise the handler will be incorrectly defined due to the obfuscation of credentials.
2020-10-06 18:13:27 +02:00
Rajaa.BARHTAOUI
9d261aae76 fix(xo-web/home): hide backup filter for non-admin users (#5287)
See #5285
2020-10-06 16:47:20 +02:00
badrAZ
3d8c8fd745 feat(xo-web/backup-reports): hide merge task when no merge (#5263) 2020-10-06 16:08:12 +02:00
Rajaa.BARHTAOUI
6ad7db522a fix(xo-web/vm/disks): fix "not enough permissions" error (#5299)
Introduced by 1116530a6b
2020-10-06 15:16:27 +02:00
Rajaa.BARHTAOUI
385984b1d8 fix(xo-web/vm/disks): VDI disappears after migration (#5296)
Related to 1116530a6b
2020-10-06 10:38:33 +02:00
Mathieu
4f3d4b06b5 fix(xo-web/user): fix custom filters in default filter select (#5298) 2020-10-02 14:40:17 +02:00
badrAZ
2291986e2c fix(xo-web/new/network): omit bond slave PIFs from selection (#5262)
See xcp-ng.org/forum/topic/3524/old-networks-not-removed-after-creating-bond/8
2020-10-02 14:12:53 +02:00
Nicolas Raynaud
fc81cf4d70 fix(xo-web/remotes): fix S3 secret key edit UI (#5305)
Fixes #5233
2020-10-02 11:38:18 +02:00
Pierre Donias
fdeab86a87 fix(xo-web/xoa): don't show expired notifications (#5304) 2020-10-01 16:40:27 +02:00
Julien Fontanet
3616b7a67b feat(xo-server/sensitive-values): make obfuscated value obvious 2020-10-01 15:01:21 +02:00
Julien Fontanet
83ea57d825 feat(xo-server/vm.migrate): ensure original error is logged
Fixes https://github.com/vatesfr/xen-orchestra/pull/4364/files#r488539823
2020-10-01 14:52:42 +02:00
Mathieu
24a69bcade feat(xo-server,xo-web/host/advanced): add IOMMU state (#5294)
Fixes #4936
2020-10-01 10:40:10 +02:00
badrAZ
58dc3244be feat: release 5.51.0 (#5295) 2020-09-30 14:19:35 +02:00
badrAZ
61e580b992 feat: technical release (#5293) 2020-09-29 16:40:26 +02:00
Rajaa.BARHTAOUI
1116530a6b feat(xo-web/vm/disks): ability to migrate VDIs to other SRs within resource set (#5201)
See #5020
2020-09-29 16:07:10 +02:00
Pierre Donias
8cfaabedeb feat(xo-server-auth-ldap): import LDAP groups (#5279)
See #1884

When a user logs into XO using LDAP:

- Create a XO user if it doesn't already exist
- Mark it as being provided by LDAP and bind it to the LDAP user with an ID
- If group synchronization is enabled:
  - Fetch all the LDAP groups based on the Base and Filter (plugin config)
  - Create and delete the corresponding XO groups based on the LDAP groups that
    were found
  - Add and remove the XO users from the XO groups based on the LDAP data
2020-09-29 15:35:30 +02:00
Rajaa.BARHTAOUI
66ba05dcd0 feat(xo-web/tasks): display linked objects (#5267)
Fixes #4275
2020-09-29 15:13:56 +02:00
Julien Fontanet
d1db616d1e feat(backups-cli/clean-vms): limit VHD merging concurrency to 1 2020-09-29 10:43:48 +02:00
Nicolas Raynaud
aed09b152a fix(import/ova): speedup the import of gziped vmdk disks nested in .ova (#5275)
This the followup to #5085

Avoid unzipping the entire file from the beginning before each read.
The test case when from 10min down to 26 seconds.

When reading a block from the gzipped file, we keep the current state in memory, if the next read happens at an offset greater than the previous read, we just carry one decompressing the file until the desired position.

The previous code would decompress from the start of the file for every read operation.
2020-09-28 15:42:55 +02:00
Nicolas Raynaud
f755365e23 fix(xo-web/remotes): fix editing bucket and directory for S3 (#5276) 2020-09-28 15:33:06 +02:00
badrAZ
ccd34c1610 fix(CHANGELOG): update advanced filter entry (#5290)
Introduced by 45fe70f0fa
2020-09-28 12:12:15 +02:00
badrAZ
f9104e6cc9 fix(xo-web/messages): remove unused messages (#5289)
Introduced by 45fe70f0fa
2020-09-28 12:10:21 +02:00
Nicolas Raynaud
4bb702fe89 fix(fs/S3): support 50GB+ files (#5242) 2020-09-27 20:49:41 +02:00
Julien Fontanet
511a04dad5 feat(xo-server-auth-ldap/configuration): add titles and reorder settings 2020-09-27 20:37:00 +02:00
Nicolas Raynaud
f3527a44d7 fix(vm/import): make the UI respond instantly to .ova file drop (#5274)
When dropping a file on the import zone, the UI didn't acknowledge the file until the vmdk table were parsed.

Now the UI parses the XML instantly, displays it on the UI, and start parsing the tables in the background. If the user clicks the "import" button, the system will finish parsing the tables and start the upload in when fell swoop. Hiding the parsing time in the upload time.
2020-09-27 19:45:32 +02:00
Julien Fontanet
fdbe84cb1e chore(xo-server-test): format with Prettier 2020-09-27 19:42:35 +02:00
badrAZ
45fe70f0fa feat(xo-web/logs/backup-ng): advanced filter (#5208)
See #4406
2020-09-25 16:58:33 +02:00
badrAZ
2aed2fd534 feat(xo-web/logs/backup-ng): log tasks pagination (#5209)
See #4406
2020-09-24 16:36:18 +02:00
Julien Fontanet
a523fa9733 feat(@xen-orchestra/backups-cli): 0.2.0 2020-09-24 15:14:07 +02:00
Julien Fontanet
0f42f032e4 feat(backups-cli/info): compute the used space per job 2020-09-24 15:12:38 +02:00
Dom Del Nano
4575b98fd5 fix(xo-server#removeSubjectToResourceSet): rename to removeSubjectFromResourceSet (#5266) 2020-09-21 17:50:55 +02:00
Pierre Donias
3a0cc0d6f6 fix(xo-server/subjects/addToArraySet): dont erase previous values (#5269)
Prior to this change, adding a value to an existing set that already contains
that value would replace the whole set with a new one containing only that
value.
2020-09-21 12:14:50 +02:00
Pierre Donias
626e2fcb12 fix(xo-server/users): serialize properties on user create as well (#5273)
This didn't break anything because we usually don't assign `groups` and/or
`preferences` (which are the only 2 properties that need serialization) on user
creation.

This also prepares a minimal change to add a `authProviders` object property on
users.
2020-09-18 12:11:20 +02:00
Pierre Donias
592feb54b7 fix(xo-server/_authenticateUser): remove broken/unused provider API (#5270) 2020-09-18 10:54:20 +02:00
badrAZ
9c6b63e7e4 feat: release 5.50.3 (#5272) 2020-09-17 16:19:30 +02:00
badrAZ
4364a74b7a feat: technical release (patch) (#5271) 2020-09-17 16:07:58 +02:00
badrAZ
00f13102f8 feat(xo-server-audit): API method to clean DB (#5150) 2020-09-17 15:03:03 +02:00
badrAZ
3f17389871 fix(xo-server-audit): remove unused variable (#5268) 2020-09-17 10:51:09 +02:00
Julien Fontanet
726ba287b1 chore(xo-server-audit/_uploadLastHash): simplify conditions 2020-09-17 10:07:32 +02:00
Julien Fontanet
42ee29cb3c fix(xo-server-audit/_uploadLastHash): dont hide errors 2020-09-17 10:06:28 +02:00
badrAZ
8a98b6b012 feat(xo-server-audit/_uploadLastHash): check integrity sequentially (#5250)
- implementation is simpler
- stop on first error
2020-09-17 09:35:42 +02:00
Julien Fontanet
14ab694804 fix(xo-cli): mkdirp must not be promisified
Introduced by d622f7a65
2020-09-16 15:56:38 +02:00
Nicolas Raynaud
14b8cda543 fix(xo-vmdk-to-vhd/grabTables): read each entry independently (#5255)
Reading all entries at once cause problems on some VMDKs (those generated by VirtualBox) because they appear to be distributed throughout the VMDK thus making the buffer not fit in memory.

See https://xcp-ng.org/forum/topic/3374/cannot-import-ova-from-virtualbox/14?_=1599689219209
2020-09-16 11:46:10 +02:00
Julien Fontanet
4264e34ffd feat(xo-web/createSubscription): support lazy subscribers (#5158)
These subscribers follow the value of the subscription but do not make the
subscription refresh itself.

A lazy subscriber triggers an initial fetch if no value is available.
2020-09-16 10:49:54 +02:00
Pierre Donias
bd9bf55e43 feat(xo-web/groups): bulk deletion (#5264) 2020-09-16 10:46:35 +02:00
Albin Hedman
7c802bbd33 feat(xo-web/dashboard/health): add 'too many snapshots' section (#5238) 2020-09-14 10:45:37 +02:00
Julien Fontanet
9e37f3f586 feat(xo-web/new VM): hide missing VDIs
See #5222

Related to 15bc30a2d
2020-09-11 11:11:25 +02:00
Nicolas Raynaud
1d4f5d068a fix(xo-web/VM import): make description optional (#5258) 2020-09-11 08:45:10 +02:00
badrAZ
5be5eb80e8 feat: release 5.50.2 (#5257) 2020-09-10 17:03:26 +02:00
Julien Fontanet
12c774a34a feat(travis-tests): dont stop on first failure on master 2020-09-10 16:42:56 +02:00
badrAZ
14c3fa4378 feat: technical release (patch) (#5256) 2020-09-10 16:36:25 +02:00
badrAZ
2f17420721 feat(xo-web/backup/overview): add link from log to its job (#5202)
See #4564
2020-09-10 16:05:36 +02:00
badrAZ
8d7f8d156f fix(xo-server,xo-web/orphan VDIs): ignore irrelevant VDI types (#5249)
Fixes #5248
2020-09-10 16:00:20 +02:00
Julien Fontanet
38248d8c35 fix(test): auto map @xen-orchestra/*, @vates/* and xo-* 2020-09-10 15:32:08 +02:00
badrAZ
edaae02892 fix(xo-web,xo-server#probeIscsiLuns): handle undefined lun size (#5212)
See xoa-support#2815
See https://xcp-ng.org/forum/topic/3409/getting-error-when-trying-to-mount-iscsi-lun
2020-09-09 12:11:50 +02:00
Rajaa.BARHTAOUI
846eff4984 feat(xo-web/vm/networks): improve tooltip messages (#5227)
See https://github.com/vatesfr/xen-orchestra/issues/4713#issuecomment-667655321
2020-09-09 11:59:54 +02:00
badrAZ
481adf3a1e feat(xo-server-audit): don't save last hash when it doesn't change (#5251) 2020-09-08 14:35:19 +02:00
Julien Fontanet
d622f7a65c chore: update dependencies 2020-09-07 10:26:51 +02:00
badrAZ
a479501aef feat: release 5.50.1 (#5246) 2020-09-04 12:04:55 +02:00
badrAZ
2456374e5a feat: technical release (patch) (#5247) 2020-09-04 11:55:01 +02:00
badrAZ
c77016ea44 feat(xo-server-usage-report): ignore replicated VMs (#5241)
Fixes #4778
2020-09-04 11:42:32 +02:00
badrAZ
6fd45a37e2 feat: technical release (patch) (#5245) 2020-09-04 11:16:45 +02:00
badrAZ
9be56d3ab8 fix(xo-server-audit): handle non-existent XOA plugin (#5239)
See 38de5048bc (commitcomment-41875481)
2020-09-04 10:31:46 +02:00
Julien Fontanet
24b264b6c9 fix(xo-server): TX checksumming is enabled by default
Introduced by fe2de9c1154c5a0b183c6d7897f1d9c376fa4031

Fixes #5234
2020-08-28 16:01:26 +02:00
tonyuh
7f9130470b feat(docs/backup_troubleshooting): SR_OPERATION_NOT_SUPPORTED (#5232)
Add SR_OPERATION_NOT_SUPPORTED error troubleshooting.
2020-08-28 11:49:47 +02:00
Julien Fontanet
b82aa1daa5 feat: release 5.50 2020-08-27 15:06:49 +02:00
Julien Fontanet
53cb325974 feat: technical release 2020-08-27 14:46:33 +02:00
Julien Fontanet
1256c320e3 feat(xo-web/host/network): button to scan PIFs
Fixes #5230
2020-08-27 11:42:28 +02:00
Julien Fontanet
15bc30a2d5 feat(xo-web/iso-device): hide missing VDIs
Fixes #5222
2020-08-27 11:13:59 +02:00
Julien Fontanet
fc3bc8468f feat(xo-server/backup): add proxyId to job log 2020-08-27 10:54:44 +02:00
Julien Fontanet
b4e068f630 chore(xo-server/backup executor): proxyId = job.proxy 2020-08-26 16:50:33 +02:00
Pierre Donias
08eef80673 feat(xo-web/orphan VDIs): show VDI(-snapshot)s that don't have VBDs (#5228)
In the Dashboard > Health > Orphan VDIs table, show non-ISO VDIs and
VDI-snapshot that don't have any VBDs.
2020-08-25 09:32:21 +02:00
Pierre Donias
152f73ebf0 feat: technical release (#5226) 2020-08-20 15:15:34 +02:00
badrAZ
38de5048bc feat(xo-server-audit): backup last hash (#5077) 2020-08-20 12:32:22 +02:00
Rajaa.BARHTAOUI
c4d96fbc49 feat(xo-web/vm/network): ability to change VIF locking mode (#5188)
See #4713
2020-08-20 09:48:50 +02:00
BenjiReis
ff25d402c1 fix(sdn-controller): host.$PIFs can have undefined element (#5217) 2020-08-20 09:25:41 +02:00
Julien Fontanet
f957024605 chore(xo-web): use js-cookie instead of cookies-js (#5224)
See #5223

`cookies-js` is no longer maintained.
2020-08-19 17:18:03 +02:00
badrAZ
006e54e2fd feat(xo-web, xo-server/proxy): improve proxy health check errors (#5191)
Fixes #5161
2020-08-19 16:20:05 +02:00
badrAZ
5f7bc58788 fix(xo-server/sensitive-values): obfuscate params containing "password" (#5220)
Fixes #5219
2020-08-19 10:56:40 +02:00
Fabian Untermoser
bdd93603aa fix(docs/backups): fix typo (#5225) 2020-08-18 21:46:30 +02:00
Rajaa.BARHTAOUI
8392a17cb2 fix(xo-server/authentication): add missing 'createPredicate' (#5221)
Fixes #5218
Introduced by 9ded2641a7
2020-08-18 16:15:35 +02:00
Julien Fontanet
5f7f0b777e fix(xo-server): use http.cookies config everywhere 2020-08-17 11:06:45 +02:00
Julien Fontanet
3f574606d9 feat(backups-cli/clean-vms): display merge progress 2020-08-17 11:06:45 +02:00
badrAZ
45f0f93895 feat(xo-server,xo-web/VM): ability to set VIF TX checksumming (#5182)
Fixes #5095
See xoa-support#2619
2020-08-14 17:14:48 +02:00
badrAZ
af2710135b fix(xo-web/proxies): remove upgrade button style (#5216)
See https://github.com/vatesfr/xen-orchestra/pull/5167/files?file-filters%5B%5D=.md#r469763760

...when the proxy is already up to date
2020-08-13 16:52:29 +02:00
Rajaa.BARHTAOUI
95ed6094fe fix(xo-web/vm/snapshot): fix redirection when copying a VM (#5213)
Introduced by d9211053ce
2020-08-13 11:12:56 +02:00
BenjiReis
6af8ce9eeb feat(sdn-controller): specify tunnel protocol at its creation (#5210) 2020-08-13 11:11:11 +02:00
badrAZ
3ff37f00fe fix(xo-web/deploy-proxy): throw error on trial start failure (#5196)
Introduced by 902953a1fa
2020-08-12 15:11:49 +02:00
Pierre Donias
ed5b066cbe fix(CHANGELOG.unreleased): remove sdn-controller package (#5214)
Introduced by cec5593c70
2020-08-12 12:21:18 +02:00
Mark Martin
cec5593c70 feat(xo-web/vm): protect from accidental shutdown (#5107)
Fixes #5090
2020-08-12 12:17:30 +02:00
Julien Fontanet
04924884ad feat(@xen-orchestra/backups-cli): 0.1.0 2020-08-11 09:57:01 +02:00
Julien Fontanet
3ccf64fcd3 feat(backups-cli/clean-vms): merge single children 2020-08-11 09:56:25 +02:00
Julien Fontanet
8eb7f9b91c fix(xo-server-sdn-controller): remove deprecated uuid import 2020-08-10 11:15:40 +02:00
Julien Fontanet
f25c50c629 chore: update dev deps 2020-08-10 11:15:40 +02:00
Rajaa.BARHTAOUI
e524a1b865 feat: release 5.49.1 (#5207) 2020-08-05 14:46:12 +02:00
Rajaa.BARHTAOUI
ac15e3355e feat: technical release (#5206)
* feat(xo-server-sdn-controller): 1.0.2

* feat(xo-web): 5.67.0

* chore(CHANGELOG): update next
2020-08-05 13:39:50 +02:00
Julien Fontanet
0930a37819 fix(CHANGELOG.unreleased): add xo-server patch
Due to 2789ead99
2020-08-05 12:33:30 +02:00
Rajaa.BARHTAOUI
d62f91a9e6 feat(xo-web/sr/advanced): show thin/thick provisioning for missing SR types (#5204) 2020-08-05 11:15:55 +02:00
Julien Fontanet
2789ead999 fix(xo-server/pool.listMissingPatches): really dont log errors
Previous fix (e1bf68ab3) was incorrect.
2020-08-05 11:10:35 +02:00
BenjiReis
f25fd267dd fix(sdn-controller): only admin can create private networks (#5200) 2020-08-04 14:09:33 +02:00
BenjiReis
47999f1f72 doc(sdn-controller): indicate correct version in OpenFlow requirements (#5199) 2020-08-03 16:47:13 +02:00
Pierre Donias
095bbcd15c feat: release 5.49.0 (#5195) 2020-07-31 14:14:32 +02:00
Pierre Donias
9177bb8451 feat: technical release (patch) (#5194) 2020-07-31 09:41:11 +02:00
Pierre Donias
119bf9b0ff feat(xo-web/VM/network): click on IP address to copy it (#5186)
Fixes #5185
2020-07-31 09:17:37 +02:00
Julien Fontanet
015c6037c4 fix: add prepublishOnly to replace removed prepare scripts
Introduced by 452a7e744
2020-07-30 19:07:36 +02:00
Adam Stankiewicz
452a7e7445 feat(test): remove need for prepare scripts (#5192) 2020-07-30 18:56:13 +02:00
Nicolas Raynaud
407586e2d5 feat(remotes): AWS S3 backup storage (#5037) 2020-07-30 16:47:04 +02:00
badrAZ
ffa431a3cd fix(xo-web/vm/tab-network): add default value to plugins (#5190)
Introduced by 2a74a49995

Plugins can be `undefined` on fetching which triggers the error `Cannot read property "some" of undefined`
2020-07-30 14:57:06 +02:00
Julien Fontanet
281a5ff991 chore: remove unused JSHint comments 2020-07-30 14:32:19 +02:00
BenjiReis
92db9bd284 fix(xo-server-sdn-controller): deactive DH for TLS connections (#5187)
Fixes #5074
2020-07-30 13:17:40 +02:00
Julien Fontanet
ea8f319f45 feat(self-signed): expose days option 2020-07-30 11:20:57 +02:00
Olivier Lambert
a11e9fe04e fix(changelog): caps typo (#5184) 2020-07-29 16:58:10 +02:00
Pierre Donias
27367bd1fc fix(CHANGELOG): wrong xo-server version (#5183) 2020-07-29 16:50:16 +02:00
Pierre Donias
c6f48ae054 feat: technical release (#5181) 2020-07-29 16:32:55 +02:00
Rajaa.BARHTAOUI
7d6efe3694 feat(xo-web/vm/network): improve the VIF locking mode feedback (#5170)
See #4713
2020-07-29 15:55:59 +02:00
Rajaa.BARHTAOUI
f4aad05edc feat(xo-web/backup): show warning if min(fullBackupInterval, retention)>50 (#5144)
See https://xcp-ng.org/forum/post/27539
2020-07-29 15:30:38 +02:00
Pierre Donias
d8f7637ca0 feat(xo-web/self): ability to cancel edition of resource set (#5174)
See xoa-support#2767
2020-07-29 13:59:45 +02:00
badrAZ
f9a7bd199e fix(xo-server#createVm): change network boot priority (#5119)
Fixes #4980
2020-07-29 11:34:24 +02:00
Rajaa.BARHTAOUI
68b7ed284a feat(xo-web/backup/health): show detached VM snapshots (#5125)
Fixes #5086
2020-07-29 10:56:31 +02:00
badrAZ
e782895cf5 feat(xo-server-audit): add extension to exported records (#5180) 2020-07-29 10:52:06 +02:00
Julien Fontanet
a5935b40d5 feat(xo-server/api): user must be signed in by default (#5175)
It's a lot more secure than previous default value.
2020-07-29 10:40:17 +02:00
badrAZ
035d2cb440 fix(xo-server-audit): fix incorrect records content type (#5179) 2020-07-29 10:38:59 +02:00
BenjiReis
2a74a49995 feat(sdn-controller, xo-web): add & remove network rules to a VM's VIFs (#5177) 2020-07-29 09:56:06 +02:00
badrAZ
902953a1fa feat(xo-server, xo-web): display proxy available upgrades (#5167) 2020-07-28 17:13:01 +02:00
Pierre Donias
1ffef91b7a fix(xo-web/copyVm): correctly pass the VM type to copyVms (#5173)
Fixes xoa-support#2773

The modal uses the type to find the objects (either VMs or snapshots) and use
them to generate the names. The missing type caused the VM clones to be named
`undefined_clone`.
2020-07-28 16:29:37 +02:00
Julien Fontanet
3d13d9b0dc feat(xo-server/addApiMethod): check method props 2020-07-28 16:02:32 +02:00
Nicolas Raynaud
adcc5d5692 feat(import/ova): allow import of gzipped vmdk disks (#5085) 2020-07-28 11:52:44 +02:00
badrAZ
c49d70170e feat(xo-server,xo-web/proxy/deploy): ability to set HTTP proxy (#5145) 2020-07-28 11:51:57 +02:00
badrAZ
349a78a5bd fix(xo-web/file-restore): ignore proxy remotes (#5171)
See xoa-support#2741
2020-07-28 11:43:33 +02:00
badrAZ
48734c6896 fix(xo-web/proxies): don't open proxy VM in new tab (#5172) 2020-07-28 10:23:16 +02:00
Rajaa.BARHTAOUI
0f60a3b24d feat(xo-web/home): ability to filter by power state (#5118) 2020-07-27 16:52:07 +02:00
Pierre Donias
d3a88011a6 feat(xo-server/self): can ignore VM snapshots resources usage (#5164)
See xoa-support#2643

With the config option `selfService.ignoreVmSnapshotResources`
2020-07-27 16:08:20 +02:00
Rajaa.BARHTAOUI
9b6e4c605b feat(xo-web/schedules): ability to enable schedule when editing (#5111)
Fixes #5026

This change affects ordinary jobs only, not backup jobs
2020-07-27 15:46:21 +02:00
Nicolas Raynaud
7c91524111 fix(OVA import): allow import of .ova files generated by Red Hat (#5159)
See xoa-support#2713.

 - use <rasd:Address> when <rasd:AddressOnParent> is not available on disks
 - avoid dotfiles and pax headers in tar parser.
2020-07-27 08:24:01 +02:00
Olivier Lambert
e1573069e4 fix(docs/license): truly fix the warning tag (#5169) 2020-07-24 10:08:58 +02:00
marcpezin
f2459c964b Fixing warning closing tag (#5168)
Fixing warning closing tag
2020-07-24 09:20:36 +02:00
Pierre Donias
43aa0b815d feat(xo-web/VM/disks): sort disks by device by default (#5165)
Fixes #5163
2020-07-23 09:53:07 +02:00
marcpezin
0740630e05 fix(docs): rebind license documentation update (#5166) 2020-07-21 17:16:18 +02:00
Rajaa.BARHTAOUI
c9244b2b13 feat(xo-web): log Invalid XML-RPC message error as an unexpected response (#5138)
See xoa-support#2588
2020-07-21 15:11:57 +02:00
Pierre Donias
0d398f867f fix(xo-web/home): link to global filter section (#5157) 2020-07-16 15:32:44 +02:00
Julien Fontanet
b74ec2d7d3 fix(xo-web/backup/restore): dont fail if no backups for a VM (#5156)
Fixes xoa-support#2707

The API does not guarantee that each VM UUID will have at least a backup, and when it happens, the code failed because `first` and `last` where not properly defined.
2020-07-16 14:29:10 +02:00
Julien Fontanet
26a295c8ed chore(xo-web/createSubscription/run): add small comment 2020-07-13 16:13:33 +02:00
Julien Fontanet
2a71d3d20c chore(xo-web/createSubscription): rename loop to run 2020-07-13 16:10:48 +02:00
Pierre Donias
b79605b692 feat: release 5.48.3 (#5154) 2020-07-10 18:12:03 +02:00
Pierre Donias
ea0fc68a53 feat: technical release (patch) (#5153) 2020-07-10 16:38:41 +02:00
badrAZ
1ca5c32de3 feat(xo-web/audit): show warning in case of disabled logs record (#5152) 2020-07-10 15:32:30 +02:00
Julien Fontanet
f51bcfa05a feat(xo-server-audit): add active setting (default false) (#5151) 2020-07-10 14:29:30 +02:00
Pierre Donias
e1bf68ab38 fix(xo-server/pool.listMissingPatches): don't log errors (#5149)
Work-around to avoid generating too many logs
2020-07-10 12:49:37 +02:00
Pierre Donias
99e03b7ce5 fix(xo-web): broken doc links (#5146)
Introduced by 30d69dadbb
2020-07-10 09:14:55 +02:00
Julien Fontanet
cd70d3ea46 fix(Travis CI): use Node 12 (#5147) 2020-07-09 15:51:22 +02:00
Julien Fontanet
d387227cef fix(xo-server/backups): dont ignore proxy job result/error 2020-07-09 09:40:02 +02:00
Pierre Donias
2f4530e426 feat: release 5.48.2 (#5143) 2020-07-07 15:49:34 +02:00
badrAZ
4db181d8bf chore(xo-server-audit): document audit DB strucure (#5078) 2020-07-07 15:22:41 +02:00
Pierre Donias
9a7a1cc752 feat: technical release (#5142) 2020-07-07 15:03:04 +02:00
Pierre Donias
59ca6c6708 feat(xo-web): prevent XO from checking time consistency of halted hosts 2020-07-07 14:51:06 +02:00
Pierre Donias
fe7901ca7f feat(xo-web): prevent XO from listing missing patches on halted XCP-ng hosts
Otherwise it triggers a lot of errors on XCP-ng
2020-07-07 14:51:06 +02:00
Pierre Donias
9351b4a5bb feat(xo-web/backup/logs): better resolution of last run log (#5141) 2020-07-07 13:47:32 +02:00
badrAZ
dfdd0a0496 fix(xo-server-test): extend timeout of deleteTempResources (#5117) 2020-07-07 13:31:13 +02:00
Pierre Donias
cda39ec256 fix(xo-web/backup/edit): tags overwritten by default ones (#5136)
Introduced by 1c042778b6
See xoa-support#2663

It was due to a race condition between the fetch of the default excluded tags
and the fetch of the job (and its schedules) which made the component populate
the form with the job's information.

Changes:
- `Edit` waits for the `job` and the `schedules` before rendering its child
- `New`'s wrapper waits for the `remotes` and the `suggestedExcludedTags` before
  rendering `New`
- `New`: `updateParams` is now called in `initialize` because we don't need to
  wait for the job and the schedules any more to be able to populate the form
2020-07-07 09:40:40 +02:00
Olivier Lambert
3720a46ff3 feat(docs/supported_hosts): no pro support for XS < 6.5 (#5137) 2020-07-06 20:03:47 +02:00
Julien Fontanet
7ea50ea41e fix(xo-server/callProxyMethod): dont use HTTP proxy 2020-07-06 17:06:37 +02:00
Pierre Donias
60a696916b feat: release 5.48.1 (#5133) 2020-07-03 15:25:39 +02:00
Pierre Donias
b6a255d96f feat: technical release (patch) (#5132) 2020-07-03 14:24:40 +02:00
marcpezin
44a0cce7f2 fix(docs/license_management): replace confusing screenshot about activation (#5131) 2020-07-03 12:03:07 +02:00
Nicolas Raynaud
f580e0d26f fix(import/OVA): fix big size parsing in OVA files (#5129) 2020-07-03 11:48:39 +02:00
Rajaa.BARHTAOUI
6beefe86e2 feat(xo-web/backup): don't open edition in new tab (#5130) 2020-07-03 11:45:06 +02:00
Julien Fontanet
cbada35788 fix(xo-server/file restore): dont fail on LVM partitions
Fixes xoa-support#2640

Introduced by 48ce7df43

The issue was due to object spreading copying only own properties but `path` is now an inherited property due to `dedupeUnmount`.
2020-07-03 11:05:04 +02:00
Julien Fontanet
44ff2f872d feat(xo-server/getBackupNgLogs): dont fail on undefined/null errors
See xoa-support#2663

This should almost never happen but if it does, it should not prevent logs from being consolidated.
2020-07-03 09:22:28 +02:00
Julien Fontanet
2198853662 feat(xo-server/logs-cli): can match on missing props 2020-07-03 09:22:28 +02:00
badrAZ
4636109081 fix(xo-web/(file-)restore-legacy): ignore proxy remotes (#5124)
Legacy restore doesn't support proxy remotes
2020-07-02 16:23:36 +02:00
Pierre Donias
1c042778b6 feat(xo-server,xo-web/smart backup): exclude XO Proxy VMs by default (#5128) 2020-07-02 15:06:47 +02:00
Rajaa.BARHTAOUI
34b5962eac fix(xo-web/backup/health): missing noop function (#5126)
Introduced by committing a suggestion https://github.com/vatesfr/xen-orchestra/pull/5062#discussion_r446135166 
without taking into account that the `noop` function is not already defined.
2020-07-02 15:05:55 +02:00
Rajaa.BARHTAOUI
fc7af59eb7 chore(xo-web/home): remove 'tags' filter from selector (#5121)
See https://github.com/vatesfr/xen-orchestra/pull/5118#discussion_r447586676
2020-07-02 14:52:59 +02:00
Olivier Lambert
7e557ca059 feat(docs/supported hosts): add CH 8.2 LTS in the list of supported hosts (#5127) 2020-07-02 09:30:16 +02:00
Julien Fontanet
1d0cea8ad0 feat(xo-server/logs-cli): add --delete command 2020-07-01 18:04:45 +02:00
Julien Fontanet
5c901d7c1e fix(xo-server/logs-cli): dont fail on non-string value 2020-07-01 18:01:34 +02:00
Julien Fontanet
1dffab0bb8 feat(xen-api): 0.29.0 2020-07-01 17:11:19 +02:00
Julien Fontanet
ae89e14ea2 feat(xo-server/getRemoteHandler): throw for proxy remotes 2020-07-01 11:46:28 +02:00
Pierre Donias
908255060c feat: release 5.48.0 (#5123) 2020-06-30 17:25:16 +02:00
Pierre Donias
88278d0041 feat: technical release (patch) (#5122) 2020-06-30 16:51:20 +02:00
Julien Fontanet
86bfd91c9d feat(xo-server/backup): logs proxy support 2020-06-30 15:45:40 +02:00
Julien Fontanet
0ee412ccb9 feat(xo-server/callProxyMethod): allow proxy.address to contain port 2020-06-30 15:20:58 +02:00
Julien Fontanet
b8bd6ea820 chore(xo-server/callProxyMethod): use parse.result to handle errors 2020-06-30 12:14:45 +02:00
Julien Fontanet
98a1ab3033 fix(xo-server/callProxyMethod): destroy lines stream in case of error 2020-06-30 12:14:45 +02:00
Julien Fontanet
e360f53a40 fix(xo-server/callProxyMethod): all lines should be JSON parsed 2020-06-30 12:14:45 +02:00
Julien Fontanet
237ec38003 fix(xo-server/callProxyMethod): lines is an object stream 2020-06-30 12:14:45 +02:00
Julien Fontanet
30ea1bbf87 feat(xo-server/callProxyMethod): expectStream as named option 2020-06-30 12:14:45 +02:00
Pierre Donias
0d0aef6014 feat: technical release (patch) (#5120) 2020-06-30 11:40:44 +02:00
badrAZ
1b7441715c feat(xo-server-perf-alert): regroup items with missing stats in one email (#4413)
Fixes #5104
2020-06-30 09:30:20 +02:00
badrAZ
e3223b6124 fix(xo-web/audit): ref not correctly forwarded (#5106)
Introduced by 9f29a047a7

The high level component (audit UI) need to call a `SortedTable` method. The only way to do it is to have access to its ref.

All components between the high level component and the `SortedTable` should forward this ref. Unfortunately, the commit  9f29a047a7 decorate the `StortedTable` with `withRouter` without forwarding its ref which breaks the audit check integrity feedback.
2020-06-29 15:44:33 +02:00
Julien Fontanet
41fb06187b fix(coalesce-calls/README): fix import 2020-06-29 15:39:14 +02:00
Julien Fontanet
adf0e8ae3b feat(@vates/coalesce-calls): 0.1.0 2020-06-29 15:33:53 +02:00
Rajaa.BARHTAOUI
42dd1efb41 feat(xo-web/home): remove unnecessary condition (#5116)
Introduced by f736381933

Backup filter: remove the needless condition to go to the first page.
2020-06-29 15:24:28 +02:00
badrAZ
b6a6694abf fix(xo-server-test/job with non-existent VM): logged as failed task (#5112)
Since c061505bf8, missing VMs are logged as a failure tasks
2020-06-29 14:04:23 +02:00
Pierre Donias
04f2f50d6d feat: technical release (#5115) 2020-06-26 15:56:40 +02:00
Rajaa.BARHTAOUI
6d1048e5c5 feat(xo-web/backup/health): show detached backups (#5062)
See #4716
2020-06-26 14:45:01 +02:00
Pierre Donias
fe722c8b31 feat(xo-web/licenses): rebind license to this XOA (#5110)
See xoa#55
Requires xoa#58

When an XOA license is bound to another XOA, allow the user to move it to the
current XOA (thus unbinding it from the other one).
2020-06-26 13:12:53 +02:00
badrAZ
0326ce1d85 feat(xo-server-audit): ignore common methods without side effects (#5109)
Ignoring these methods reduce the number of records in the audit which makes it easier to analyze and store.
2020-06-25 16:55:57 +02:00
Rajaa.BARHTAOUI
183ddb68d3 fix(changelog): missing a parenthesis (#5113) 2020-06-25 15:53:30 +02:00
228 changed files with 12184 additions and 3440 deletions

View File

@@ -1,7 +1,6 @@
language: node_js
node_js:
#- stable # disable for now due to an issue of indirect dep upath with Node 9
- 8
- 12
# Use containers.
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/

View File

@@ -0,0 +1,46 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/coalesce-calls
[![Package Version](https://badgen.net/npm/v/@vates/coalesce-calls)](https://npmjs.org/package/@vates/coalesce-calls) ![License](https://badgen.net/npm/license/@vates/coalesce-calls) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/coalesce-calls)](https://bundlephobia.com/result?p=@vates/coalesce-calls) [![Node compatibility](https://badgen.net/npm/node/@vates/coalesce-calls)](https://npmjs.org/package/@vates/coalesce-calls)
> Wraps an async function so that concurrent calls will be coalesced
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/coalesce-calls):
```
> npm install --save @vates/coalesce-calls
```
## Usage
```js
import { coalesceCalls } from '@vates/coalesce-calls'
const connect = coalesceCalls(async function () {
// async operation
})
connect()
// the previous promise result will be returned if the operation is not
// complete yet
connect()
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -0,0 +1,13 @@
```js
import { coalesceCalls } from '@vates/coalesce-calls'
const connect = coalesceCalls(async function () {
// async operation
})
connect()
// the previous promise result will be returned if the operation is not
// complete yet
connect()
```

View File

@@ -0,0 +1,14 @@
exports.coalesceCalls = function (fn) {
let promise
const clean = () => {
promise = undefined
}
return function () {
if (promise !== undefined) {
return promise
}
promise = fn.apply(this, arguments)
promise.then(clean, clean)
return promise
}
}

View File

@@ -0,0 +1,33 @@
/* eslint-env jest */
const { coalesceCalls } = require('./')
const pDefer = () => {
const r = {}
r.promise = new Promise((resolve, reject) => {
r.reject = reject
r.resolve = resolve
})
return r
}
describe('coalesceCalls', () => {
it('decorates an async function', async () => {
const fn = coalesceCalls(promise => promise)
const defer1 = pDefer()
const promise1 = fn(defer1.promise)
const defer2 = pDefer()
const promise2 = fn(defer2.promise)
defer1.resolve('foo')
expect(await promise1).toBe('foo')
expect(await promise2).toBe('foo')
const defer3 = pDefer()
const promise3 = fn(defer3.promise)
defer3.resolve('bar')
expect(await promise3).toBe('bar')
})
})

View File

@@ -0,0 +1,38 @@
{
"private": false,
"name": "@vates/coalesce-calls",
"description": "Wraps an async function so that concurrent calls will be coalesced",
"keywords": [
"async",
"calls",
"coalesce",
"decorate",
"decorator",
"merge",
"promise",
"wrap",
"wrapper"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/coalesce-calls",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/coalesce-calls",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"files": [
"index.js"
],
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.0",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -46,7 +46,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}

View File

@@ -30,6 +30,7 @@
"rimraf": "^3.0.0"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"core-js": "^3.6.4",
"golike-defer": "^0.4.1",
"lodash": "^4.17.15",

View File

@@ -2,9 +2,12 @@
import 'core-js/features/symbol/async-iterator'
import assert from 'assert'
import createLogger from '@xen-orchestra/log'
import defer from 'golike-defer'
import hash from 'object-hash'
const log = createLogger('xo:audit-core')
export class Storage {
constructor() {
this._lock = Promise.resolve()
@@ -25,7 +28,7 @@ export class Storage {
//
// http://man7.org/linux/man-pages/man3/crypt.3.html#NOTES
const ID_TO_ALGORITHM = {
'5': 'sha256',
5: 'sha256',
}
export class AlteredRecordError extends Error {
@@ -65,8 +68,17 @@ export class AuditCore {
@defer
async add($defer, subject, event, data) {
const time = Date.now()
$defer(await this._storage.acquireLock())
return this._addUnsafe({
data,
event,
subject,
time,
})
}
async _addUnsafe({ data, event, subject, time }) {
const storage = this._storage
$defer(await storage.acquireLock())
// delete "undefined" properties and normalize data with JSON.stringify
const record = JSON.parse(
@@ -139,4 +151,45 @@ export class AuditCore {
await this._storage.del(id)
}
}
@defer
async deleteRangeAndRewrite($defer, newest, oldest) {
assert.notStrictEqual(newest, undefined)
assert.notStrictEqual(oldest, undefined)
const storage = this._storage
$defer(await storage.acquireLock())
assert.notStrictEqual(await storage.get(newest), undefined)
const oldestRecord = await storage.get(oldest)
assert.notStrictEqual(oldestRecord, undefined)
const lastId = await storage.getLastId()
const recentRecords = []
for await (const record of this.getFrom(lastId)) {
if (record.id === newest) {
break
}
recentRecords.push(record)
}
for await (const record of this.getFrom(newest)) {
await storage.del(record.id)
if (record.id === oldest) {
break
}
}
await storage.setLastId(oldestRecord.previousId)
for (const record of recentRecords) {
try {
await this._addUnsafe(record)
await storage.del(record.id)
} catch (error) {
log.error(error)
}
}
}
}

View File

@@ -17,9 +17,10 @@ interface Record {
}
export class AuditCore {
constructor(storage: Storage) {}
public add(subject: any, event: string, data: any): Promise<Record> {}
public checkIntegrity(oldest: string, newest: string): Promise<number> {}
public getFrom(newest?: string): AsyncIterator {}
public deleteFrom(newest: string): Promise<void> {}
constructor(storage: Storage) { }
public add(subject: any, event: string, data: any): Promise<Record> { }
public checkIntegrity(oldest: string, newest: string): Promise<number> { }
public getFrom(newest?: string): AsyncIterator { }
public deleteFrom(newest: string): Promise<void> { }
public deleteRangeAndRewrite(newest: string, oldest: string): Promise<void> { }
}

View File

@@ -3,6 +3,17 @@ const { dirname } = require('path')
const fs = require('promise-toolbox/promisifyAll')(require('fs'))
module.exports = fs
fs.getSize = path =>
fs.stat(path).then(
_ => _.size,
error => {
if (error.code === 'ENOENT') {
return 0
}
throw error
}
)
fs.mktree = async function mkdirp(path) {
try {
await fs.mkdir(path)

View File

@@ -8,9 +8,10 @@ let force
const assert = require('assert')
const flatten = require('lodash/flatten')
const getopts = require('getopts')
const limitConcurrency = require('limit-concurrency-decorator').default
const lockfile = require('proper-lockfile')
const pipe = require('promise-toolbox/pipe')
const { default: Vhd } = require('vhd-lib')
const { default: Vhd, mergeVhd } = require('vhd-lib')
const { dirname, resolve } = require('path')
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
const { isValidXva } = require('@xen-orchestra/backups/isValidXva')
@@ -26,10 +27,10 @@ const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
//
// the whole chain will be merged into parent, parent will be renamed to child
// and all the others will deleted
async function mergeVhdChain(chain) {
const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain) {
assert(chain.length >= 2)
const child = chain[0]
let child = chain[0]
const parent = chain[chain.length - 1]
const children = chain.slice(0, -1).reverse()
@@ -46,15 +47,36 @@ async function mergeVhdChain(chain) {
// `mergeVhd` does not work with a stream, either
// - make it accept a stream
// - or create synthetic VHD which is not a stream
return console.warn('TODO: implement merge')
// await mergeVhd(
// handler,
// parent,
// handler,
// children.length === 1
// ? child
// : await createSyntheticStream(handler, children)
// )
if (children.length !== 1) {
console.warn('TODO: implement merging multiple children')
children.length = 1
child = children[0]
}
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
console.log('merging %s: %s/%s', child, done, total)
}
}, 10e3)
await mergeVhd(
handler,
parent,
handler,
child,
// children.length === 1
// ? child
// : await createSyntheticStream(handler, children),
{
onProgress({ done: d, total: t }) {
done = d
total = t
},
}
)
clearInterval(handle)
}
await Promise.all([
@@ -66,7 +88,7 @@ async function mergeVhdChain(chain) {
return force && handler.unlink(child)
}),
])
}
})
const listVhds = pipe([
vmDir => vmDir + '/vdis',

View File

@@ -0,0 +1,58 @@
const groupBy = require('lodash/groupBy')
const { createHash } = require('crypto')
const { dirname, resolve } = require('path')
const asyncMap = require('../_asyncMap')
const { readdir2, readFile, getSize } = require('../_fs')
const sha512 = str => createHash('sha512').update(str).digest('hex')
const sum = values => values.reduce((a, b) => a + b)
module.exports = async function info(vmDirs) {
const jsonFiles = (
await asyncMap(vmDirs, async vmDir =>
(await readdir2(vmDir)).filter(_ => _.endsWith('.json'))
)
).flat()
const hashes = { __proto__: null }
const info = (
await asyncMap(jsonFiles, async jsonFile => {
try {
const jsonDir = dirname(jsonFile)
const json = await readFile(jsonFile)
const hash = sha512(json)
if (hash in hashes) {
console.log(jsonFile, 'duplicate of', hashes[hash])
return
}
hashes[hash] = jsonFile
const metadata = JSON.parse(json)
return {
jsonDir,
jsonFile,
metadata,
size:
json.length +
(await (metadata.mode === 'delta'
? asyncMap(Object.values(metadata.vhds), _ =>
getSize(resolve(jsonDir, _))
).then(sum)
: getSize(resolve(jsonDir, metadata.xva)))),
}
} catch (error) {
console.error(jsonFile, error)
}
})
).filter(_ => _ !== undefined)
const byJobs = groupBy(info, 'metadata.jobId')
Object.keys(byJobs)
.sort()
.forEach(jobId => {
console.log(jobId, sum(byJobs[jobId].map(_ => _.size)))
})
}

View File

@@ -13,6 +13,12 @@ require('./_composeCommands')({
},
usage: 'xo-vm-backups <field path>',
},
info: {
get main() {
return require('./commands/info')
},
usage: 'xo-vm-backups/*',
},
})(process.argv.slice(2), 'xo-backups').catch(error => {
console.error('main', error)
process.exitCode = 1

View File

@@ -7,9 +7,10 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/backups": "^0.1.1",
"@xen-orchestra/fs": "^0.10.4",
"@xen-orchestra/fs": "^0.11.1",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.15",
"promise-toolbox": "^0.15.0",
"proper-lockfile": "^4.1.1",
@@ -32,7 +33,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.0.0",
"version": "0.2.1",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -16,7 +16,7 @@
"postversion": "npm publish --access public"
},
"dependencies": {
"d3-time-format": "^2.2.3",
"d3-time-format": "^3.0.0",
"fs-extra": "^9.0.0"
},
"license": "AGPL-3.0-or-later",

View File

@@ -18,7 +18,7 @@
"preferGlobal": true,
"dependencies": {
"golike-defer": "^0.4.1",
"xen-api": "^0.28.5"
"xen-api": "^0.29.0"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "0.10.4",
"version": "0.11.1",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
@@ -25,17 +25,18 @@
"@marsaud/smb2": "^0.15.0",
"@sindresorhus/df": "^3.1.1",
"@xen-orchestra/async-map": "^0.0.0",
"aws-sdk": "^2.686.0",
"decorator-synchronized": "^0.5.0",
"execa": "^4.0.2",
"fs-extra": "^9.0.0",
"get-stream": "^5.1.0",
"get-stream": "^6.0.0",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.15.0",
"readable-stream": "^3.0.6",
"through2": "^3.0.0",
"tmp": "^0.1.0",
"xo-remote-parser": "^0.5.0"
"through2": "^4.0.2",
"tmp": "^0.2.1",
"xo-remote-parser": "^0.6.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -58,7 +59,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
},
"author": {

View File

@@ -5,7 +5,7 @@ import getStream from 'get-stream'
import asyncMap from '@xen-orchestra/async-map'
import limit from 'limit-concurrency-decorator'
import path from 'path'
import path, { basename } from 'path'
import synchronized from 'decorator-synchronized'
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
import { parse } from 'xo-remote-parser'
@@ -121,6 +121,7 @@ export default class RemoteHandlerAbstract {
await this.__closeFile(fd)
}
// TODO: remove method
async createOutputStream(
file: File,
{ checksum = false, ...options }: Object = {}
@@ -221,19 +222,15 @@ export default class RemoteHandlerAbstract {
)
}
createWriteStream(
file: File,
options: { end?: number, flags?: string, start?: number } = {}
): Promise<LaxWritable> {
return timeout.call(
this._createWriteStream(
typeof file === 'string' ? normalizePath(file) : file,
{
flags: 'wx',
...options,
}
)
)
// write a stream to a file using a temporary file
async outputStream(
input: Readable | Promise<Readable>,
path: string,
{ checksum = true }: { checksum?: boolean } = {}
): Promise<void> {
path = normalizePath(path)
input = await input
return this._outputStream(await input, normalizePath(path), { checksum })
}
// Free the resources possibly dedicated to put the remote at work, when it
@@ -321,18 +318,6 @@ export default class RemoteHandlerAbstract {
return this._readFile(normalizePath(file), { flags })
}
async refreshChecksum(path: string): Promise<void> {
path = normalizePath(path)
const stream = (await this._createReadStream(path, { flags: 'r' })).pipe(
createChecksumStream()
)
stream.resume() // start reading the whole file
await this._outputFile(checksumFile(path), await stream.checksum, {
flags: 'wx',
})
}
async rename(
oldPath: string,
newPath: string,
@@ -548,6 +533,22 @@ export default class RemoteHandlerAbstract {
return this._outputFile(file, data, options)
}
async _outputStream(input, path, { checksum }) {
const tmpPath = `${dirname(path)}/.${basename(path)}`
const output = await this.createOutputStream(tmpPath, { checksum })
try {
input.pipe(output)
await fromEvent(output, 'finish')
await output.checksumWritten
// $FlowFixMe
await input.task
await this.rename(tmpPath, path, { checksum })
} catch (error) {
await this.unlink(tmpPath, { checksum })
throw error
}
}
_read(
file: File,
buffer: Buffer,

View File

@@ -42,18 +42,6 @@ describe('createOutputStream()', () => {
})
})
describe('createReadStream()', () => {
it(`throws in case of timeout`, async () => {
const testHandler = new TestHandler({
createReadStream: () => new Promise(() => {}),
})
const promise = testHandler.createReadStream('file')
jest.advanceTimersByTime(TIMEOUT)
await expect(promise).rejects.toThrowError(TimeoutError)
})
})
describe('getInfo()', () => {
it('throws in case of timeout', async () => {
const testHandler = new TestHandler({

View File

@@ -2,7 +2,6 @@
import 'dotenv/config'
import asyncIteratorToStream from 'async-iterator-to-stream'
import getStream from 'get-stream'
import { forOwn, random } from 'lodash'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
@@ -91,31 +90,6 @@ handlers.forEach(url => {
})
})
describe('#createReadStream()', () => {
beforeEach(() => handler.outputFile('file', TEST_DATA))
testWithFileDescriptor('file', 'r', async ({ file, flags }) => {
await expect(
await getStream.buffer(
await handler.createReadStream(file, { flags })
)
).toEqual(TEST_DATA)
})
})
describe('#createWriteStream()', () => {
testWithFileDescriptor('file', 'wx', async ({ file, flags }) => {
const stream = await handler.createWriteStream(file, { flags })
await fromCallback(pipeline, createTestDataStream(), stream)
await expect(await handler.readFile('file')).toEqual(TEST_DATA)
})
it('fails if parent dir is missing', async () => {
const error = await rejectionOf(handler.createWriteStream('dir/file'))
expect(error.code).toBe('ENOENT')
})
})
describe('#getInfo()', () => {
let info
beforeAll(async () => {

View File

@@ -4,6 +4,7 @@ import execa from 'execa'
import type RemoteHandler from './abstract'
import RemoteHandlerLocal from './local'
import RemoteHandlerNfs from './nfs'
import RemoteHandlerS3 from './s3'
import RemoteHandlerSmb from './smb'
import RemoteHandlerSmbMount from './smb-mount'
@@ -13,6 +14,7 @@ export type Remote = { url: string }
const HANDLERS = {
file: RemoteHandlerLocal,
nfs: RemoteHandlerNfs,
s3: RemoteHandlerS3,
}
try {

284
@xen-orchestra/fs/src/s3.js Normal file
View File

@@ -0,0 +1,284 @@
import AWS from 'aws-sdk'
import { parse } from 'xo-remote-parser'
import RemoteHandlerAbstract from './abstract'
import { createChecksumStream } from './checksum'
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
// limits: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
const MIN_PART_SIZE = 1024 * 1024 * 5 // 5MB
const MAX_PART_SIZE = 1024 * 1024 * 1024 * 5 // 5GB
const MAX_PARTS_COUNT = 10000
const MAX_OBJECT_SIZE = 1024 * 1024 * 1024 * 1024 * 5 // 5TB
const IDEAL_FRAGMENT_SIZE = Math.ceil(MAX_OBJECT_SIZE / MAX_PARTS_COUNT) // the smallest fragment size that still allows a 5TB upload in 10000 fragments, about 524MB
export default class S3Handler extends RemoteHandlerAbstract {
constructor(remote, _opts) {
super(remote)
const { host, path, username, password } = parse(remote.url)
// https://www.zenko.io/blog/first-things-first-getting-started-scality-s3-server/
this._s3 = new AWS.S3({
accessKeyId: username,
apiVersion: '2006-03-01',
endpoint: host,
s3ForcePathStyle: true,
secretAccessKey: password,
signatureVersion: 'v4',
})
const splitPath = path.split('/').filter(s => s.length)
this._bucket = splitPath.shift()
this._dir = splitPath.join('/')
}
get type() {
return 's3'
}
_createParams(file) {
return { Bucket: this._bucket, Key: this._dir + file }
}
async _outputStream(input, path, { checksum }) {
let inputStream = input
if (checksum) {
const checksumStream = createChecksumStream()
const forwardError = error => {
checksumStream.emit('error', error)
}
input.pipe(checksumStream)
input.on('error', forwardError)
inputStream = checksumStream
}
const upload = this._s3.upload(
{
...this._createParams(path),
Body: inputStream,
},
{ partSize: IDEAL_FRAGMENT_SIZE }
)
await upload.promise()
if (checksum) {
const checksum = await inputStream.checksum
const params = {
...this._createParams(path + '.checksum'),
Body: checksum,
}
await this._s3.upload(params).promise()
}
await input.task
}
async _writeFile(file, data, options) {
return this._s3
.putObject({ ...this._createParams(file), Body: data })
.promise()
}
async _createReadStream(file, options) {
return this._s3.getObject(this._createParams(file)).createReadStream()
}
async _unlink(file) {
return this._s3.deleteObject(this._createParams(file)).promise()
}
async _list(dir) {
function splitPath(path) {
return path.split('/').filter(d => d.length)
}
const prefix = [this._dir, dir].join('/')
const splitPrefix = splitPath(prefix)
const request = this._s3.listObjectsV2({
Bucket: this._bucket,
Prefix: splitPrefix.join('/'),
})
const result = await request.promise()
const uniq = new Set()
for (const entry of result.Contents) {
const line = splitPath(entry.Key)
if (line.length > splitPrefix.length) {
uniq.add(line[splitPrefix.length])
}
}
return [...uniq]
}
async _rename(oldPath, newPath) {
const params = {
...this._createParams(newPath),
CopySource: `/${this._bucket}/${this._dir}${oldPath}`,
}
await this._s3.copyObject(params).promise()
await this._s3.deleteObject(this._createParams(oldPath)).promise()
}
async _getSize(file) {
if (typeof file !== 'string') {
file = file.fd
}
const result = await this._s3.headObject(this._createParams(file)).promise()
return +result.ContentLength
}
async _read(file, buffer, position = 0) {
if (typeof file !== 'string') {
file = file.fd
}
const params = this._createParams(file)
params.Range = `bytes=${position}-${position + buffer.length - 1}`
const result = await this._s3.getObject(params).promise()
result.Body.copy(buffer)
return { bytesRead: result.Body.length, buffer }
}
async _write(file, buffer, position) {
if (typeof file !== 'string') {
file = file.fd
}
const uploadParams = this._createParams(file)
const fileSize = +(await this._s3.headObject(uploadParams).promise())
.ContentLength
if (fileSize < MIN_PART_SIZE) {
const resultBuffer = Buffer.alloc(
Math.max(fileSize, position + buffer.length)
)
const fileContent = (await this._s3.getObject(uploadParams).promise())
.Body
fileContent.copy(resultBuffer)
buffer.copy(resultBuffer, position)
await this._s3
.putObject({ ...uploadParams, Body: resultBuffer })
.promise()
return { buffer, bytesWritten: buffer.length }
} else {
// using this trick: https://stackoverflow.com/a/38089437/72637
// multipart fragments have a minimum size of 5Mo and a max of 5Go unless they are last
// splitting the file in 3 parts: [prefix, edit, suffix]
// if `prefix` is bigger than 5Mo, it will be sourced from uploadPartCopy()
// otherwise otherwise it will be downloaded, concatenated to `edit`
// `edit` will always be an upload part
// `suffix` will ways be sourced from uploadPartCopy()
const multipartParams = await this._s3
.createMultipartUpload(uploadParams)
.promise()
try {
const parts = []
const prefixSize = position
let suffixOffset = prefixSize + buffer.length
let suffixSize = Math.max(0, fileSize - suffixOffset)
let hasSuffix = suffixSize > 0
let editBuffer = buffer
let editBufferOffset = position
let partNumber = 1
if (prefixSize < MIN_PART_SIZE) {
const downloadParams = {
...uploadParams,
Range: `bytes=0-${prefixSize - 1}`,
}
const prefixBuffer =
prefixSize > 0
? (await this._s3.getObject(downloadParams).promise()).Body
: Buffer.alloc(0)
editBuffer = Buffer.concat([prefixBuffer, buffer])
editBufferOffset = 0
} else {
const fragmentsCount = Math.ceil(prefixSize / MAX_PART_SIZE)
const prefixFragmentSize = Math.ceil(prefixSize / fragmentsCount)
const lastFragmentSize =
prefixFragmentSize * fragmentsCount - prefixSize
let prefixPosition = 0
for (let i = 0; i < fragmentsCount; i++) {
const copyPrefixParams = {
...multipartParams,
PartNumber: partNumber++,
CopySource: `/${this._bucket}/${this._dir + file}`,
CopySourceRange: `bytes=${prefixPosition}-${
prefixPosition + prefixFragmentSize - 1
}`,
}
const prefixPart = (
await this._s3.uploadPartCopy(copyPrefixParams).promise()
).CopyPartResult
parts.push({
ETag: prefixPart.ETag,
PartNumber: copyPrefixParams.PartNumber,
})
prefixPosition += prefixFragmentSize
}
if (lastFragmentSize) {
}
}
if (hasSuffix && editBuffer.length < MIN_PART_SIZE) {
// the edit fragment is too short and is not the last fragment
// let's steal from the suffix fragment to reach the minimum size
// the suffix might be too short and itself entirely absorbed in the edit fragment, making it the last one.
const complementSize = Math.min(
MIN_PART_SIZE - editBuffer.length,
suffixSize
)
const complementOffset = editBufferOffset + editBuffer.length
suffixOffset += complementSize
suffixSize -= complementSize
hasSuffix = suffixSize > 0
const prefixRange = `bytes=${complementOffset}-${
complementOffset + complementSize - 1
}`
const downloadParams = { ...uploadParams, Range: prefixRange }
const complementBuffer = (
await this._s3.getObject(downloadParams).promise()
).Body
editBuffer = Buffer.concat([editBuffer, complementBuffer])
}
const editParams = {
...multipartParams,
Body: editBuffer,
PartNumber: partNumber++,
}
const editPart = await this._s3.uploadPart(editParams).promise()
parts.push({ ETag: editPart.ETag, PartNumber: editParams.PartNumber })
if (hasSuffix) {
const suffixFragments = Math.ceil(suffixSize / MAX_PART_SIZE)
const suffixFragmentsSize = Math.ceil(suffixSize / suffixFragments)
let suffixFragmentOffset = suffixOffset
for (let i = 0; i < suffixFragments; i++) {
const fragmentEnd = suffixFragmentOffset + suffixFragmentsSize
const suffixRange = `bytes=${suffixFragmentOffset}-${
Math.min(fileSize, fragmentEnd) - 1
}`
const copySuffixParams = {
...multipartParams,
PartNumber: partNumber++,
CopySource: `/${this._bucket}/${this._dir + file}`,
CopySourceRange: suffixRange,
}
const suffixPart = (
await this._s3.uploadPartCopy(copySuffixParams).promise()
).CopyPartResult
parts.push({
ETag: suffixPart.ETag,
PartNumber: copySuffixParams.PartNumber,
})
suffixFragmentOffset = fragmentEnd
}
}
await this._s3
.completeMultipartUpload({
...multipartParams,
MultipartUpload: { Parts: parts },
})
.promise()
} catch (e) {
await this._s3.abortMultipartUpload(multipartParams).promise()
throw e
}
}
}
async _openFile(path, flags) {
return path
}
async _closeFile(fd) {}
}

View File

@@ -49,7 +49,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -0,0 +1,24 @@
/benchmark/
/benchmarks/
*.bench.js
*.bench.js.map
/examples/
example.js
example.js.map
*.example.js
*.example.js.map
/fixture/
/fixtures/
*.fixture.js
*.fixture.js.map
*.fixtures.js
*.fixtures.js.map
/test/
/tests/
*.spec.js
*.spec.js.map
__snapshots__/

View File

@@ -0,0 +1,141 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @xen-orchestra/openflow
[![Package Version](https://badgen.net/npm/v/@xen-orchestra/openflow)](https://npmjs.org/package/@xen-orchestra/openflow) ![License](https://badgen.net/npm/license/@xen-orchestra/openflow) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@xen-orchestra/openflow)](https://bundlephobia.com/result?p=@xen-orchestra/openflow) [![Node compatibility](https://badgen.net/npm/node/@xen-orchestra/openflow)](https://npmjs.org/package/@xen-orchestra/openflow)
> Pack and unpack OpenFlow messages
## Install
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/openflow):
```
> npm install --save @xen-orchestra/openflow
```
## Usage
Unpacking a received OpenFlow message from a socket:
```js
import openflow from '@xen-orchestra/openflow'
import parse from '@xen-orchestra/openflow/parse-socket'
const version = openflow.versions.openFlow11
const ofProtocol = openflow.protocols[version]
function parseOpenFlowMessages(socket) {
for await (const msg of parse(socket)) {
if (msg.header !== undefined) {
const ofType = msg.header.type
switch (ofType) {
case ofProtocol.type.hello:
// Handle OFPT_HELLO
break
case ofProtocol.type.error:
// Handle OFPT_ERROR
break
case ofProtocol.type.echoRequest:
// Handle OFPT_ECHO_REQUEST
break
case ofProtocol.type.packetIn:
// Handle OFPT_PACKET_IN
break
case ofProtocol.type.featuresReply:
// Handle OFPT_FEATURES_REPLY
break
case ofProtocol.type.getConfigReply:
// Handle OFPT_GET_CONFIG_REPLY
break
case ofProtocol.type.portStatus:
// Handle OFPT_PORT_STATUS
break
case ofProtocol.type.flowRemoved:
// Handle OFPT_FLOW_REMOVED
break
default:
// Error: Invalid type
break
}
} else {
// Error: Message is unparseable
}
}
}
```
Unpacking a OpenFlow message from a buffer:
```js
import openflow from '@xen-orchestra/openflow'
const version = openflow.versions.openFlow11
const ofProtocol = openflow.protocols[version]
function processOpenFlowMessage(buf) {
const unpacked = openflow.unpack(buf)
const ofType = unpacked.header.type
switch (ofType) {
case ofProtocol.type.hello:
// Handle OFPT_HELLO
break
case ofProtocol.type.error:
// Handle OFPT_ERROR
break
case ofProtocol.type.echoRequest:
// Handle OFPT_ECHO_REQUEST
break
case ofProtocol.type.packetIn:
// Handle OFPT_PACKET_IN
break
case ofProtocol.type.featuresReply:
// Handle OFPT_FEATURES_REPLY
break
case ofProtocol.type.getConfigReply:
// Handle OFPT_GET_CONFIG_REPLY
break
case ofProtocol.type.portStatus:
// Handle OFPT_PORT_STATUS
break
case ofProtocol.type.flowRemoved:
// Handle OFPT_FLOW_REMOVED
break
default:
// Error: Invalid type
break
}
}
```
Packing an OpenFlow OFPT_HELLO message:
```js
import openflow from '@xen-orchestra/openflow'
const version = openflow.versions.openFlow11
const ofProtocol = openflow.protocols[version]
const buf = openflow.pack({
header: {
version,
type: ofProtocol.type.hello,
xid: 1,
},
})
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -0,0 +1,108 @@
Unpacking a received OpenFlow message from a socket:
```js
import openflow from '@xen-orchestra/openflow'
import parse from '@xen-orchestra/openflow/parse-socket'
const version = openflow.versions.openFlow11
const ofProtocol = openflow.protocols[version]
function parseOpenFlowMessages(socket) {
for await (const msg of parse(socket)) {
if (msg.header !== undefined) {
const ofType = msg.header.type
switch (ofType) {
case ofProtocol.type.hello:
// Handle OFPT_HELLO
break
case ofProtocol.type.error:
// Handle OFPT_ERROR
break
case ofProtocol.type.echoRequest:
// Handle OFPT_ECHO_REQUEST
break
case ofProtocol.type.packetIn:
// Handle OFPT_PACKET_IN
break
case ofProtocol.type.featuresReply:
// Handle OFPT_FEATURES_REPLY
break
case ofProtocol.type.getConfigReply:
// Handle OFPT_GET_CONFIG_REPLY
break
case ofProtocol.type.portStatus:
// Handle OFPT_PORT_STATUS
break
case ofProtocol.type.flowRemoved:
// Handle OFPT_FLOW_REMOVED
break
default:
// Error: Invalid type
break
}
} else {
// Error: Message is unparseable
}
}
}
```
Unpacking a OpenFlow message from a buffer:
```js
import openflow from '@xen-orchestra/openflow'
const version = openflow.versions.openFlow11
const ofProtocol = openflow.protocols[version]
function processOpenFlowMessage(buf) {
const unpacked = openflow.unpack(buf)
const ofType = unpacked.header.type
switch (ofType) {
case ofProtocol.type.hello:
// Handle OFPT_HELLO
break
case ofProtocol.type.error:
// Handle OFPT_ERROR
break
case ofProtocol.type.echoRequest:
// Handle OFPT_ECHO_REQUEST
break
case ofProtocol.type.packetIn:
// Handle OFPT_PACKET_IN
break
case ofProtocol.type.featuresReply:
// Handle OFPT_FEATURES_REPLY
break
case ofProtocol.type.getConfigReply:
// Handle OFPT_GET_CONFIG_REPLY
break
case ofProtocol.type.portStatus:
// Handle OFPT_PORT_STATUS
break
case ofProtocol.type.flowRemoved:
// Handle OFPT_FLOW_REMOVED
break
default:
// Error: Invalid type
break
}
}
```
Packing an OpenFlow OFPT_HELLO message:
```js
import openflow from '@xen-orchestra/openflow'
const version = openflow.versions.openFlow11
const ofProtocol = openflow.protocols[version]
const buf = openflow.pack({
header: {
version,
type: ofProtocol.type.hello,
xid: 1,
},
})
```

View File

@@ -0,0 +1,40 @@
{
"description": "Pack and unpack OpenFlow messages",
"private": false,
"name": "@xen-orchestra/openflow",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/openflow",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/openflow",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.1.1",
"engines": {
"node": ">=8.10"
},
"main": "dist/",
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"postversion": "npm publish --access public",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"devDependencies": {
"@babel/cli": "^7.7.4",
"@babel/core": "^7.7.4",
"@babel/preset-env": "^7.7.4",
"cross": "^1.0.0",
"rimraf": "^3.0.0"
},
"dependencies": {
"@vates/read-chunk": "^0.1.0"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC"
}

View File

@@ -0,0 +1 @@
module.exports = require('./dist/parse-socket')

View File

@@ -0,0 +1,9 @@
export default {
size: 8,
offsets: {
version: 0,
type: 1,
length: 2,
xid: 4,
},
}

View File

@@ -0,0 +1,38 @@
import get from './util/get-from-map'
import ofVersion from './version'
// TODO: More openflow versions
import of11 from './openflow-11/index'
import scheme from './default-header-scheme'
// =============================================================================
const OPENFLOW = {
[ofVersion.openFlow11]: of11,
}
// =============================================================================
export default {
versions: ofVersion,
protocols: { [ofVersion.openFlow11]: of11.protocol },
// ---------------------------------------------------------------------------
pack: object => {
const version = object.header.version
return get(
OPENFLOW,
version,
`Unsupported OpenFlow version: ${version}`
).pack(object)
},
unpack: (buffer, offset = 0) => {
const version = buffer.readUInt8(offset + scheme.offsets.version)
return get(
OPENFLOW,
version,
`Unsupported OpenFlow version: ${version}`
).unpack(buffer, offset)
},
}

View File

@@ -0,0 +1,58 @@
import get from '../../util/get-from-map'
import ofOutput from './output'
import of from '../openflow-11'
// =============================================================================
const ACTION = {
[of.actionType.output]: ofOutput,
/* TODO:
[of.actionType.group]: ,
[of.actionType.setVlanId]: ,
[of.actionType.setVlanPcp]: ,
[of.actionType.setDlSrc]: ,
[of.actionType.setDlDst]: ,
[of.actionType.setNwSrc]: ,
[of.actionType.setNwDst]: ,
[of.actionType.setNwTos]: ,
[of.actionType.setNwEcn]: ,
[of.actionType.setTpSrc]: ,
[of.actionType.setTpDst]: ,
[of.actionType.copyTtlOut]: ,
[of.actionType.copyTtlIn]: ,
[of.actionType.setMplsLabel]: ,
[of.actionType.setMplsTc]: ,
[of.actionType.setMplsTtl]: ,
[of.actionType.decMplsTtl]: ,
[of.actionType.pushVlan]: ,
[of.actionType.popVlan]: ,
[of.actionType.pushMpls]: ,
[of.actionType.popMpls]: ,
[of.actionType.setQueue]: ,
[of.actionType.setNwTtl]: ,
[of.actionType.decNwTtl]: ,
[of.actionType.experimenter]:
*/
}
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
const { type } = object
return get(ACTION, type, `Invalid action type: ${type}`).pack(
object,
buffer,
offset
)
},
unpack: (buffer, offset = 0) => {
const type = buffer.readUInt16BE(offset + of.offsets.actionHeader.type)
return get(ACTION, type, `Invalid action type: ${type}`).unpack(
buffer,
offset
)
},
}

View File

@@ -0,0 +1,45 @@
import assert from 'assert'
import of from '../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.actionOutput
const PAD_LENGTH = 6
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
assert(object.type === of.actionType.output)
object.len = of.sizes.actionOutput
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.len)
buffer.writeUInt16BE(object.type, offset + OFFSETS.type)
buffer.writeUInt16BE(object.len, offset + OFFSETS.len)
buffer.writeUInt32BE(object.port, offset + OFFSETS.port)
buffer.writeUInt16BE(object.max_len, offset + OFFSETS.maxLen)
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
return buffer
},
unpack: (buffer, offset = 0) => {
const object = {}
object.type = buffer.readUInt16BE(offset + OFFSETS.type)
assert(object.type === of.actionType.output)
object.len = buffer.readUInt16BE(offset + OFFSETS.len)
assert(object.len === of.sizes.actionOutput)
object.port = buffer.readUInt32BE(offset + OFFSETS.port)
object.max_len = buffer.readUInt16BE(offset + OFFSETS.maxLen)
return object
},
}

View File

@@ -0,0 +1,49 @@
import get from '../util/get-from-map'
import echo from './message/echo'
import error from './message/error'
import hello from './message/hello'
import featuresRequest from './message/features-request'
import featuresReply from './message/features-reply'
import getConfigRequest from './message/get-config-request'
import switchConfig from './message/switch-config'
import flowMod from './message/flow-mod'
import of from './openflow-11'
// =============================================================================
const MESSAGE = {
[of.type.hello]: hello,
[of.type.error]: error,
[of.type.featuresRequest]: featuresRequest,
[of.type.featuresReply]: featuresReply,
[of.type.echoRequest]: echo,
[of.type.echoReply]: echo,
[of.type.getConfigRequest]: getConfigRequest,
[of.type.getConfigReply]: switchConfig,
[of.type.setConfig]: switchConfig,
[of.type.flowMod]: flowMod,
}
// =============================================================================
export default {
protocol: of,
// ---------------------------------------------------------------------------
pack: object => {
const type = object.header.type
return get(MESSAGE, type, `Invalid OpenFlow message type: ${type}`).pack(
object
)
},
unpack: (buffer, offset = 0) => {
const type = buffer.readUInt8(offset + of.offsets.header.type)
return get(MESSAGE, type, `Invalid OpenFlow message type: ${type}`).unpack(
buffer,
offset
)
},
}

View File

@@ -0,0 +1,102 @@
import assert from 'assert'
import get from '../../util/get-from-map'
import ofAction from '../action/action'
import of from '../openflow-11'
// =============================================================================
const SIZES = {
[of.actionType.output]: of.sizes.actionOutput,
[of.actionType.group]: of.sizes.actionGroup,
[of.actionType.setVlanId]: of.sizes.actionVlanId,
[of.actionType.setVlanPcp]: of.sizes.actionVlanPcp,
[of.actionType.setDlSrc]: of.sizes.actionDlAddr,
[of.actionType.setDlDst]: of.sizes.actionDlAddr,
[of.actionType.setNwSrc]: of.sizes.actionNwAddr,
[of.actionType.setNwDst]: of.sizes.actionNwAddr,
[of.actionType.setNwTos]: of.sizes.actionNwTos,
[of.actionType.setNwEcn]: of.sizes.actionNwEcn,
[of.actionType.setTpSrc]: of.sizes.actionTpPort,
[of.actionType.setTpDst]: of.sizes.actionTpPort,
[of.actionType.copyTtlOut]: of.sizes.actionHeader,
[of.actionType.copyTtlIn]: of.sizes.actionHeader,
[of.actionType.setMplsLabel]: of.sizes.actionMplsLabel,
[of.actionType.setMplsTc]: of.sizes.actionMplsTc,
[of.actionType.setMplsTtl]: of.sizes.actionMplsTtl,
[of.actionType.decMplsTtl]: of.sizes.actionMplsTtl,
[of.actionType.pushVlan]: of.sizes.actionPush,
[of.actionType.popVlan]: of.sizes.actionHeader,
[of.actionType.pushMpls]: of.sizes.actionPush,
[of.actionType.popMpls]: of.sizes.actionPopMpls,
[of.actionType.setQueue]: of.sizes.actionSetQueue,
[of.actionType.setNwTtl]: of.sizes.actionNwTtl,
[of.actionType.decNwTtl]: of.sizes.actionNwTtl,
}
// -----------------------------------------------------------------------------
const TYPES = [
of.instructionType.clearActions,
of.instructionType.writeActions,
of.instructionType.applyActions,
]
const OFFSETS = of.offsets.instructionActions
const PAD_LENGTH = 4
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
const { type } = object
assert(TYPES.includes(type))
object.len = of.sizes.instructionActions
const { actions = [] } = object
actions.forEach(action => {
assert(Object.values(of.actionType).includes(action.type))
// TODO: manage experimenter
object.len += get(
SIZES,
action.type,
`Invalid action type: ${action.type}`
)
})
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.len)
buffer.writeUInt16BE(type, offset + OFFSETS.type)
buffer.writeUInt16BE(object.len, offset + OFFSETS.len)
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
let actionOffset = offset + OFFSETS.actions
actions.forEach(action => {
ofAction.pack(action, buffer, actionOffset)
actionOffset += SIZES[action.type]
})
},
unpack: (buffer = undefined, offset = 0) => {
const type = buffer.readUInt16BE(offset + OFFSETS.type)
assert(TYPES.includes(type))
const object = { type }
object.len = buffer.readUInt16BE(offset + OFFSETS.len)
if (type === of.instructionType.clearActions) {
// No actions for this type
return object
}
object.actions = []
let actionOffset = offset + OFFSETS.actions
while (actionOffset < object.len) {
const action = ofAction.unpack(buffer, actionOffset)
actionOffset += action.len
object.actions.push(action)
}
return object
},
}

View File

@@ -0,0 +1,43 @@
import get from '../../util/get-from-map'
import actions from './actions'
// import goToTable from './goToTable'
import of from '../openflow-11'
// import writeMetadata from './writeMetadata'
// =============================================================================
const INSTRUCTION = {
/* TODO:
[of.instructionType.goToTable]: goToTable,
[of.instructionType.writeMetadata]: writeMetadata,
*/
[of.instructionType.writeActions]: actions,
[of.instructionType.applyActions]: actions,
[of.instructionType.clearActions]: actions,
}
// -----------------------------------------------------------------------------
const OFFSETS = of.offsets.instruction
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
const { type } = object
return get(INSTRUCTION, type, `Invalid instruction type: ${type}`).pack(
object,
buffer,
offset
)
},
unpack: (buffer = undefined, offset = 0) => {
const type = buffer.readUInt16BE(offset + OFFSETS.type)
return get(INSTRUCTION, type, `Invalid instruction type: ${type}`).unpack(
buffer,
offset
)
},
}

View File

@@ -0,0 +1,46 @@
import assert from 'assert'
import ofHeader from './header'
import of from '../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.echo
const TYPES = [of.type.echoRequest, of.type.echoReply]
// =============================================================================
export default {
pack: object => {
const { header, data } = object
assert(TYPES.includes(header.type))
const dataSize = data !== undefined ? data.length : 0
header.length = of.sizes.header + dataSize
const buffer = Buffer.alloc(header.length)
ofHeader.pack(header, buffer, OFFSETS.header)
if (dataSize > 0) {
data.copy(buffer, OFFSETS.data, 0, dataSize)
}
return buffer
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
assert(TYPES.includes(header.type))
const object = { header }
const dataSize = header.length - of.sizes.header
if (dataSize > 0) {
object.data = Buffer.alloc(dataSize)
buffer.copy(
object.data,
0,
offset + OFFSETS.data,
offset + OFFSETS.data + dataSize
)
}
return object
},
}

View File

@@ -0,0 +1,79 @@
import assert from 'assert'
import get from '../../util/get-from-map'
import ofHeader from './header'
import of from '../openflow-11'
// =============================================================================
const ERROR_CODE = {
[of.errorType.helloFailed]: of.helloFailedCode,
[of.errorType.badRequest]: of.badRequestCode,
[of.errorType.badAction]: of.badActionCode,
[of.errorType.badInstruction]: of.badInstructionCode,
[of.errorType.badMatch]: of.badMatchCode,
[of.errorType.flowModFailed]: of.flowModFailedCode,
[of.errorType.groupModFailed]: of.groupModFailedCode,
[of.errorType.portModFailed]: of.portModFailedCode,
[of.errorType.tableModFailed]: of.tableModFailedCode,
[of.errorType.queueOpFailed]: of.queueOpFailedCode,
[of.errorType.switchConfigFailed]: of.switchConfigFailedCode,
}
// -----------------------------------------------------------------------------
const OFFSETS = of.offsets.errorMsg
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
const { header, type, code, data } = object
assert(header.type === of.type.error)
const errorCodes = get(ERROR_CODE, type, `Invalid error type: ${type}`)
assert(Object.values(errorCodes).includes(code))
object.length = of.sizes.errorMsg
if (data !== undefined) {
object.length += data.length
}
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.length)
ofHeader.pack(header, buffer, offset + OFFSETS.header)
buffer.writeUInt16BE(type, offset + OFFSETS.type)
buffer.writeUInt16BE(code, offset + OFFSETS.code)
if (data !== undefined) {
data.copy(buffer, offset + OFFSETS.data, 0, data.length)
}
return buffer
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
assert(header.type === of.type.error)
const type = buffer.readUInt16BE(offset + OFFSETS.type)
const errorCodes = get(ERROR_CODE, type, `Invalid error type: ${type}`)
const code = buffer.readUInt16BE(offset + OFFSETS.code)
assert(Object.values(errorCodes).includes(code))
const object = { header, type, code }
const dataSize = header.length - of.sizes.errorMsg
if (dataSize > 0) {
object.data = Buffer.alloc(dataSize)
buffer.copy(
object.data,
0,
offset + OFFSETS.data,
offset + OFFSETS.data + dataSize
)
}
return object
},
}

View File

@@ -0,0 +1,73 @@
import assert from 'assert'
import ofHeader from './header'
import ofPort from '../struct/port'
import of from '../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.switchFeatures
const PAD_LENGTH = 3
// =============================================================================
export default {
pack: object => {
const {
header,
datapath_id: did,
n_buffers: nBufs,
n_tables: nTables,
capabilities,
reserved,
ports,
} = object
assert(header.type === of.type.featuresReply)
header.length = of.sizes.switchFeatures + ports.length * of.sizes.port
const buffer = Buffer.alloc(header.length)
ofHeader.pack(header, buffer, OFFSETS.header)
buffer.writeBigUInt64BE(did, OFFSETS.datapathId)
buffer.writeUInt32BE(nBufs, OFFSETS.nBuffers)
buffer.writeUInt8(nTables, OFFSETS.nTables)
buffer.fill(0, OFFSETS.pad, OFFSETS.pad + PAD_LENGTH)
buffer.writeUInt32BE(capabilities, OFFSETS.capabilities)
buffer.writeUInt32BE(reserved, OFFSETS.reserved)
let portsOffset = 0
ports.forEach(port => {
ofPort.pack(port, buffer, OFFSETS.ports + portsOffset++ * of.sizes.port)
})
return buffer
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
assert(header.type === of.type.featuresReply)
const object = { header }
object.datapath_id = buffer.toString(
'hex',
offset + OFFSETS.datapathId,
offset + OFFSETS.datapathId + 8
)
object.n_buffers = buffer.readUInt32BE(offset + OFFSETS.nBuffers)
object.n_tables = buffer.readUInt8(offset + OFFSETS.nTables)
object.capabilities = buffer.readUInt32BE(offset + OFFSETS.capabilities)
object.reserved = buffer.readUInt32BE(offset + OFFSETS.reserved)
object.ports = []
const nPorts = (header.length - of.sizes.switchFeatures) / of.sizes.port
for (let i = 0; i < nPorts; ++i) {
object.ports.push(
ofPort.unpack(buffer, offset + OFFSETS.ports + i * of.sizes.port)
)
}
return object
},
}

View File

@@ -0,0 +1,24 @@
import assert from 'assert'
import ofHeader from './header'
import of from '../openflow-11'
// =============================================================================
export default {
pack: object => {
const { header } = object
assert(header.type === of.type.featuresRequest)
header.length = of.sizes.featuresRequest
return ofHeader.pack(header)
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset)
assert(header.type === of.type.featuresRequest)
assert(header.length === of.sizes.featuresRequest)
return { header }
},
}

View File

@@ -0,0 +1,197 @@
import assert from 'assert'
import get from '../../util/get-from-map'
import ofInstruction from '../instruction/instruction'
import uIntHelper from '../../util/uint-helper'
import ofHeader from './header'
import of from '../openflow-11'
import ofMatch from '../struct/match/match'
// =============================================================================
const INSTRUCTION_SIZE = {
[of.instructionType.goToTable]: of.sizes.instructionWriteMetadata,
[of.instructionType.writeMetadata]: of.sizes.instructionGotoTable,
[of.instructionType.clearActions]: of.sizes.instructionActions,
[of.instructionType.writeActions]: of.sizes.instructionActions,
[of.instructionType.applyActions]: of.sizes.instructionActions,
}
const ACTION_SIZE = {
[of.actionType.output]: of.sizes.actionOutput,
[of.actionType.group]: of.sizes.actionGroup,
[of.actionType.setVlanId]: of.sizes.actionVlanId,
[of.actionType.setVlanPcp]: of.sizes.actionVlanPcp,
[of.actionType.setDlSrc]: of.sizes.actionDlAddr,
[of.actionType.setDlDst]: of.sizes.actionDlAddr,
[of.actionType.setNwSrc]: of.sizes.actionNwAddr,
[of.actionType.setNwDst]: of.sizes.actionNwAddr,
[of.actionType.setNwTos]: of.sizes.actionNwTos,
[of.actionType.setNwEcn]: of.sizes.actionNwEcn,
[of.actionType.setTpSrc]: of.sizes.actionTpPort,
[of.actionType.setTpDst]: of.sizes.actionTpPort,
[of.actionType.copyTtlOut]: of.sizes.actionHeader,
[of.actionType.copyTtlIn]: of.sizes.actionHeader,
[of.actionType.setMplsLabel]: of.sizes.actionMplsLabel,
[of.actionType.setMplsTc]: of.sizes.actionMplsTc,
[of.actionType.setMplsTtl]: of.sizes.actionMplsTtl,
[of.actionType.decMplsTtl]: of.sizes.actionMplsTtl,
[of.actionType.pushVlan]: of.sizes.actionPush,
[of.actionType.popVlan]: of.sizes.actionHeader,
[of.actionType.pushMpls]: of.sizes.actionPush,
[of.actionType.popMpls]: of.sizes.actionPopMpls,
[of.actionType.setQueue]: of.sizes.actionSetQueue,
[of.actionType.setNwTtl]: of.sizes.actionNwTtl,
[of.actionType.decNwTtl]: of.sizes.actionNwTtl,
}
// -----------------------------------------------------------------------------
const OFFSETS = of.offsets.flowMod
const COOKIE_LENGTH = 8
const PAD_LENGTH = 2
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
const {
header,
cookie,
cookie_mask,
table_id = 0,
command,
idle_timeout = 0,
hard_timeout = 0,
priority = of.defaultPriority,
buffer_id = 0xffffffff,
out_port = of.port.any,
out_group = of.group.any,
flags = 0,
match,
instructions = [],
} = object
// fill header length
header.length = of.sizes.flowMod
instructions.forEach(instruction => {
header.length += get(
INSTRUCTION_SIZE,
instruction.type,
`Invalid instruction type: ${instruction.type}`
)
const { actions = [] } = instruction
actions.forEach(action => {
header.length += get(
ACTION_SIZE,
action.type,
`Invalid instruction type: ${action.type}`
)
})
})
buffer = buffer !== undefined ? buffer : Buffer.alloc(header.length)
ofHeader.pack(header, buffer, offset + OFFSETS.header)
if (cookie !== undefined) {
if (cookie_mask !== undefined) {
cookie_mask.copy(buffer, offset + OFFSETS.cookieMask)
} else {
buffer.fill(
0x00,
offset + OFFSETS.cookie_mask,
offset + OFFSETS.cookieMask + COOKIE_LENGTH
)
}
cookie.copy(buffer, offset + OFFSETS.cookie)
} else {
buffer.fill(
0x00,
offset + OFFSETS.cookie,
offset + OFFSETS.cookie + COOKIE_LENGTH
)
buffer.fill(
0xff,
offset + OFFSETS.cookieMask,
offset + OFFSETS.cookieMask + COOKIE_LENGTH
)
}
buffer.writeUInt8(table_id, offset + OFFSETS.tableId)
assert(Object.values(of.flowModCommand).includes(command))
buffer.writeUInt8(command, offset + OFFSETS.command)
buffer.writeUInt16BE(idle_timeout, offset + OFFSETS.idleTimeout)
buffer.writeUInt16BE(hard_timeout, offset + OFFSETS.hardTimeout)
buffer.writeUInt16BE(priority, offset + OFFSETS.priority)
buffer.writeUInt32BE(buffer_id, offset + OFFSETS.bufferId)
buffer.writeUInt32BE(out_port, offset + OFFSETS.outPort)
buffer.writeUInt32BE(out_group, offset + OFFSETS.outGroup)
buffer.writeUInt16BE(flags, offset + OFFSETS.flags)
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
ofMatch.pack(match, buffer, offset + OFFSETS.match)
let instructionOffset = offset + OFFSETS.instructions
instructions.forEach(instruction => {
ofInstruction.pack(instruction, buffer, instructionOffset)
instructionOffset += instruction.len
})
return buffer
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
assert(header.type === of.type.flowMod)
const object = { header }
object.cookie = Buffer.alloc(COOKIE_LENGTH)
buffer.copy(
object.cookie,
0,
offset + OFFSETS.cookie,
offset + OFFSETS.cookie + COOKIE_LENGTH
)
if (
!uIntHelper.isUInt64None([
buffer.readUInt32BE(offset + OFFSETS.cookieMask),
buffer.readUInt32BE(offset + OFFSETS.cookieMask + COOKIE_LENGTH / 2),
])
) {
object.cookie_mask = Buffer.alloc(COOKIE_LENGTH)
buffer.copy(
object.cookie_mask,
0,
offset + OFFSETS.cookieMask,
offset + OFFSETS.cookieMask + COOKIE_LENGTH
)
}
object.table_id = buffer.readUInt8(offset + OFFSETS.tableId)
object.command = buffer.readUInt8(offset + OFFSETS.command)
assert(Object.values(of.flowModCommand).includes(object.command))
object.idle_timeout = buffer.readUInt16BE(offset + OFFSETS.idleTimeout)
object.hard_timeout = buffer.readUInt16BE(offset + OFFSETS.hardTimeout)
object.priority = buffer.readUInt16BE(offset + OFFSETS.priority)
object.buffer_id = buffer.readUInt32BE(offset + OFFSETS.bufferId)
object.out_port = buffer.readUInt32BE(offset + OFFSETS.outPort)
object.out_group = buffer.readUInt32BE(offset + OFFSETS.outGroup)
object.flags = buffer.readUInt16BE(offset + OFFSETS.flags)
object.match = ofMatch.unpack(buffer, offset + OFFSETS.match)
object.instructions = []
let instructionOffset = offset + OFFSETS.instructions
while (instructionOffset < header.length) {
const instruction = ofInstruction.unpack(buffer, instructionOffset)
object.instructions.push(instruction)
instructionOffset += instruction.len
}
return object
},
}

View File

@@ -0,0 +1,24 @@
import assert from 'assert'
import ofHeader from './header'
import of from '../openflow-11'
// =============================================================================
export default {
pack: object => {
const { header } = object
assert(header.type === of.type.getConfigRequest)
header.length = of.sizes.header
return ofHeader.pack(header)
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset)
assert(header.type === of.type.getConfigRequest)
assert(header.length === of.sizes.header)
return { header }
},
}

View File

@@ -0,0 +1,39 @@
import assert from 'assert'
import of from '../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.header
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
buffer = buffer !== undefined ? buffer : Buffer.alloc(of.sizes.header)
const { version, type, length, xid } = object
assert(version === of.version)
assert(Object.values(of.type).includes(type))
buffer.writeUInt8(version, offset + OFFSETS.version)
buffer.writeUInt8(type, offset + OFFSETS.type)
buffer.writeUInt16BE(length, offset + OFFSETS.length)
buffer.writeUInt32BE(xid, offset + OFFSETS.xid)
return buffer
},
unpack: (buffer, offset = 0) => {
const version = buffer.readUInt8(offset + OFFSETS.version)
assert(version === of.version)
const type = buffer.readUInt8(offset + OFFSETS.type)
assert(Object.values(of.type).includes(type))
const length = buffer.readUInt16BE(offset + OFFSETS.length)
const xid = buffer.readUInt32BE(offset + OFFSETS.xid)
return { version, type, length, xid }
},
}

View File

@@ -0,0 +1,27 @@
import assert from 'assert'
import ofHeader from './header'
import of from '../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.hello
// =============================================================================
export default {
pack: object => {
const { header } = object
assert(header.type === of.type.hello)
header.length = of.sizes.hello
return ofHeader.pack(header)
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
assert(header.type === of.type.hello)
return { header }
},
}

View File

@@ -0,0 +1,38 @@
import assert from 'assert'
import ofHeader from './header'
import of from '../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.switchConfig
const TYPES = [of.type.getConfigReply, of.type.setConfig]
// =============================================================================
export default {
pack: object => {
const { header, flags, miss_send_len } = object
assert(TYPES.includes(header.type))
header.length = of.sizes.switchConfig
const buffer = Buffer.alloc(header.length)
ofHeader.pack(header, buffer, OFFSETS.header)
buffer.writeUInt16BE(flags, OFFSETS.flags)
buffer.writeUInt16BE(miss_send_len, OFFSETS.missSendLen)
return buffer
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
assert(TYPES.includes(header.type))
assert(header.length === of.sizes.switchConfig)
const flags = buffer.readUInt16BE(offset + OFFSETS.flags)
const miss_send_len = buffer.readUInt16BE(offset + OFFSETS.missSendLen)
return { header, flags, miss_send_len }
},
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,374 @@
import assert from 'assert'
import addressParser from '../../../util/addrress-parser'
import uIntHelper from '../../../util/uint-helper'
import of from '../../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.match
const WILDCARDS = of.flowWildcards
const IP4_ADDR_LEN = 4
const METADATA_LENGTH = 8
const PAD_LENGTH = 1
const PAD2_LENGTH = 3
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
assert(object.type === of.matchType.standard)
object.length = of.sizes.match
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.length)
buffer.writeUInt16BE(object.type, offset + OFFSETS.type)
buffer.writeUInt16BE(object.length, offset + OFFSETS.length)
let wildcards = 0
let inPort = 0
if (object.in_port !== undefined) {
inPort = object.in_port
} else {
wildcards |= WILDCARDS.inPort
}
buffer.writeUInt32BE(inPort, offset + OFFSETS.inPort)
if (object.dl_src !== undefined) {
if (object.dl_src_mask !== undefined) {
addressParser.stringToEth(
object.dl_src_mask,
buffer,
offset + OFFSETS.dlSrcMask
)
} else {
buffer.fill(
0x00,
offset + OFFSETS.dlSrcMask,
offset + OFFSETS.dlSrcMask + of.ethAddrLen
)
}
addressParser.stringToEth(object.dl_src, buffer, offset + OFFSETS.dlSrc)
} else {
buffer.fill(
0x00,
offset + OFFSETS.dlSrc,
offset + OFFSETS.dlSrc + of.ethAddrLen
)
buffer.fill(
0xff,
offset + OFFSETS.dlSrcMask,
offset + OFFSETS.dlSrcMask + of.ethAddrLen
)
}
if (object.dl_dst !== undefined) {
if (object.dl_dst_mask !== undefined) {
addressParser.stringToEth(
object.dl_dst_mask,
buffer,
offset + OFFSETS.dlDstMask
)
} else {
buffer.fill(
0x00,
offset + OFFSETS.dlDstMask,
offset + OFFSETS.dlDstMask + of.ethAddrLen
)
}
addressParser.stringToEth(object.dl_dst, buffer, offset + OFFSETS.dlDst)
} else {
buffer.fill(
0x00,
offset + OFFSETS.dlDst,
offset + OFFSETS.dlDst + of.ethAddrLen
)
buffer.fill(
0xff,
offset + OFFSETS.dlDstMask,
offset + OFFSETS.dlDstMask + of.ethAddrLen
)
}
let dlVlan = 0
if (object.dl_vlan !== undefined) {
dlVlan = object.dl_vlan
} else {
wildcards |= WILDCARDS.dlVlan
}
buffer.writeUInt16BE(dlVlan, offset + OFFSETS.dlVlan)
let dlVlanPcp = 0
if (object.dl_vlan_pcp !== undefined) {
dlVlanPcp = object.dl_vlan_pcp
} else {
wildcards |= WILDCARDS.dlVlanPcp
}
buffer.writeUInt8(dlVlanPcp, offset + OFFSETS.dlVlanPcp)
buffer.fill(0, offset + OFFSETS.pad1, offset + OFFSETS.pad1 + PAD_LENGTH)
let dlType = 0
if (object.dl_type !== undefined) {
dlType = object.dl_type
} else {
wildcards |= WILDCARDS.dlType
}
buffer.writeUInt16BE(dlType, offset + OFFSETS.dlType)
let nwTos = 0
if (object.nw_tos !== undefined) {
nwTos = object.nw_tos
} else {
wildcards |= WILDCARDS.nwTos
}
buffer.writeUInt8(nwTos, offset + OFFSETS.nwTos)
let nwProto = 0
if (object.nw_proto !== undefined) {
nwProto = object.nw_proto
} else {
wildcards |= WILDCARDS.nwProto
}
buffer.writeUInt8(nwProto, offset + OFFSETS.nwProto)
if (object.nw_src !== undefined) {
if (object.nw_src_mask !== undefined) {
addressParser.stringToip4(
object.nw_src_mask,
buffer,
offset + OFFSETS.nwSrcMask
)
} else {
buffer.fill(
0x00,
offset + OFFSETS.nwSrcMask,
offset + OFFSETS.nwSrcMask + IP4_ADDR_LEN
)
}
addressParser.stringToip4(object.nw_src, buffer, offset + OFFSETS.nwSrc)
} else {
buffer.fill(
0x00,
offset + OFFSETS.nwSrc,
offset + OFFSETS.nwSrc + IP4_ADDR_LEN
)
buffer.fill(
0xff,
offset + OFFSETS.nwSrcMask,
offset + OFFSETS.nwSrcMask + IP4_ADDR_LEN
)
}
if (object.nw_dst !== undefined) {
if (object.nw_dst_mask !== undefined) {
addressParser.stringToip4(
object.nw_dst_mask,
buffer,
offset + OFFSETS.nwDstMask
)
} else {
buffer.fill(
0x00,
offset + OFFSETS.nwDstMask,
offset + OFFSETS.nwDstMask + IP4_ADDR_LEN
)
}
addressParser.stringToip4(object.nw_dst, buffer, offset + OFFSETS.nwDst)
} else {
buffer.fill(
0x00,
offset + OFFSETS.nwDst,
offset + OFFSETS.nwDst + IP4_ADDR_LEN
)
buffer.fill(
0xff,
offset + OFFSETS.nwDstMask,
offset + OFFSETS.nwDstMask + IP4_ADDR_LEN
)
}
let tpSrc = 0
if (object.tp_src !== undefined) {
tpSrc = object.tp_src
} else {
wildcards |= WILDCARDS.tpSrc
}
buffer.writeUInt16BE(tpSrc, offset + OFFSETS.tpSrc)
let tpDst = 0
if (object.tp_dst !== undefined) {
tpDst = object.tp_dst
} else {
wildcards |= WILDCARDS.tpDst
}
buffer.writeUInt16BE(tpDst, offset + OFFSETS.tpDst)
let mplsLabel = 0
if (object.mpls_label !== undefined) {
mplsLabel = object.mpls_label
} else {
wildcards |= WILDCARDS.mplsLabel
}
buffer.writeUInt32BE(mplsLabel, offset + OFFSETS.mplsLabel)
let mplsTc = 0
if (object.mpls_tc !== undefined) {
mplsTc = object.mpls_tc
} else {
wildcards |= WILDCARDS.mplsTc
}
buffer.writeUInt8(mplsTc, offset + OFFSETS.mplsTc)
buffer.fill(0, offset + OFFSETS.pad2, offset + OFFSETS.pad2 + PAD2_LENGTH)
if (object.metadata !== undefined) {
if (object.metadata_mask !== undefined) {
buffer.copy(
object.metadata_mask,
0,
offset + OFFSETS.metadataMask,
offset + OFFSETS.metadataMask + METADATA_LENGTH
)
} else {
buffer.fill(
0x00,
offset + OFFSETS.metadataMask,
offset + OFFSETS.metadataMask + METADATA_LENGTH
)
}
buffer.copy(
object.metadata,
0,
offset + OFFSETS.metadata,
offset + OFFSETS.metadata + METADATA_LENGTH
)
} else {
buffer.fill(
0x00,
offset + OFFSETS.metadata,
offset + OFFSETS.metadata + METADATA_LENGTH
)
buffer.fill(
0xff,
offset + OFFSETS.metadataMask,
offset + OFFSETS.metadataMask + METADATA_LENGTH
)
}
buffer.writeUInt32BE(wildcards, offset + OFFSETS.wildcards)
return buffer
},
unpack: (buffer, offset = 0) => {
const object = {}
object.type = buffer.readUInt16BE(offset + OFFSETS.type)
assert(object.type === of.matchType.standard)
object.length = buffer.readUInt16BE(offset + OFFSETS.length)
assert(object.length === of.sizes.match)
// Wildcards indicate which value to use for the match.
// if `wildcards & of.wildcards.<value>` === 0 then `value` is not wildcarded and must be used.
const wildcards = (object.wildcards = buffer.readUInt32BE(
offset + OFFSETS.wildcards
))
if ((wildcards & WILDCARDS.inPort) === 0) {
object.in_port = buffer.readUInt32BE(offset + OFFSETS.inPort)
}
if (!addressParser.isEthMaskAll(buffer, offset + OFFSETS.dlSrcMask)) {
if (!addressParser.isEthMaskNone(buffer, offset + OFFSETS.dlSrcMask)) {
object.dl_src_mask = addressParser.ethToString(
buffer,
offset + OFFSETS.dlSrcMask
)
}
object.dl_src = addressParser.ethToString(buffer, offset + OFFSETS.dlSrc)
}
if (!addressParser.isEthMaskAll(buffer, offset + OFFSETS.dlDstMask)) {
if (!addressParser.isEthMaskNone(buffer, offset + OFFSETS.dlDstMask)) {
object.dl_dst_mask = addressParser.ethToString(
buffer,
offset + OFFSETS.dlDstMask
)
}
object.dl_dst = addressParser.ethToString(buffer, offset + OFFSETS.dlDst)
}
if ((wildcards & WILDCARDS.dlVlan) === 0) {
object.dl_vlan = buffer.readUInt16BE(offset + OFFSETS.dlVlan)
}
if ((wildcards & WILDCARDS.dlVlanPcp) === 0) {
object.dl_vlan_pcp = buffer.readUInt16BE(offset + OFFSETS.dlVlanPcp)
}
if ((wildcards & WILDCARDS.dlType) === 0) {
object.dl_type = buffer.readUInt16BE(offset + OFFSETS.dlType)
}
if ((wildcards & WILDCARDS.nwTos) === 0) {
object.nw_tos = buffer.readUInt8(offset + OFFSETS.nwTos)
}
if ((wildcards & WILDCARDS.nwProto) === 0) {
object.nw_proto = buffer.readUInt8(offset + OFFSETS.nwProto)
}
if (!addressParser.isIp4MaskAll(buffer, offset + OFFSETS.nwSrcMask)) {
if (!addressParser.isIp4MaskNone(buffer, offset + OFFSETS.nwSrcMask)) {
object.nw_src_mask = addressParser.ip4ToString(
buffer,
offset + OFFSETS.nwSrcMask
)
}
object.nw_src = addressParser.ip4ToString(buffer, offset + OFFSETS.nwSrc)
}
if (!addressParser.isIp4MaskAll(buffer, offset + OFFSETS.nwDstMask)) {
if (!addressParser.isIp4MaskNone(buffer, offset + OFFSETS.nwDstMask)) {
object.nw_dst_mask = addressParser.ip4ToString(
buffer,
offset + OFFSETS.nwDstMask
)
}
object.nw_dst = addressParser.ip4ToString(buffer, offset + OFFSETS.nwDst)
}
if ((wildcards & WILDCARDS.tpSrc) === 0) {
object.tp_src = buffer.readUInt16BE(offset + OFFSETS.tpSrc)
}
if ((wildcards & WILDCARDS.tpDst) === 0) {
object.tp_dst = buffer.readUInt16BE(offset + OFFSETS.tpDst)
}
if ((wildcards & WILDCARDS.mplsLabel) === 0) {
object.mpls_label = buffer.readUInt32BE(offset + OFFSETS.mplsLabel)
}
if ((wildcards & WILDCARDS.mplsTc) === 0) {
object.mpls_tc = buffer.readUInt32BE(offset + OFFSETS.mplsTc)
}
const metadataMask = [
buffer.readUInt32BE(offset + OFFSETS.metadataMask),
buffer.readUInt32BE(offset + OFFSETS.metadataMask + METADATA_LENGTH / 2),
]
if (!uIntHelper.isUInt64All(metadataMask)) {
if (!uIntHelper.isUInt64None(metadataMask)) {
object.metadata_mask = Buffer.alloc(METADATA_LENGTH)
buffer.copy(
object.metadata_mask,
0,
offset + OFFSETS.metadataMask,
offset + OFFSETS.metadataMask + METADATA_LENGTH
)
}
object.metadata = Buffer.alloc(METADATA_LENGTH)
buffer.copy(
object.metadata,
0,
offset + OFFSETS.metadata,
offset + OFFSETS.metadata + METADATA_LENGTH
)
}
return object
},
}

View File

@@ -0,0 +1,79 @@
import of from '../openflow-11'
import addressParser from '../../util/addrress-parser'
// =============================================================================
const OFFSETS = of.offsets.port
const PAD_LENGTH = 4
const PAD2_LENGTH = 2
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
buffer = buffer !== undefined ? buffer : Buffer.alloc(of.sizes.port)
const {
port_no: portNo,
hw_addr: hwAddr,
name,
config,
state,
curr,
advertised,
supported,
peer,
curr_speed: currSpeed,
max_speed: maxSpeed,
} = object
buffer.writeUInt32BE(portNo, offset + OFFSETS.portNo)
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
addressParser.stringToEth(hwAddr, buffer, offset + OFFSETS.hwAddr)
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD2_LENGTH)
buffer.write(name, offset + OFFSETS.name, of.maxPortNameLen)
if (name.length < of.maxPortNameLen) {
buffer.fill(
0,
offset + OFFSETS.name + name.length,
offset + OFFSETS.name + of.maxPortNameLen
)
}
buffer.writeUInt32BE(config, offset + OFFSETS.config)
buffer.writeUInt32BE(state, offset + OFFSETS.state)
buffer.writeUInt32BE(curr, offset + OFFSETS.curr)
buffer.writeUInt32BE(advertised, offset + OFFSETS.advertised)
buffer.writeUInt32BE(supported, offset + OFFSETS.supported)
buffer.writeUInt32BE(peer, offset + OFFSETS.peer)
buffer.writeUInt32BE(currSpeed, offset + OFFSETS.currSpeed)
buffer.writeUInt32BE(maxSpeed, offset + OFFSETS.maxSpeed)
return buffer
},
unpack: (buffer, offset = 0) => {
const body = {}
body.port_no = buffer.readUInt32BE(offset + OFFSETS.portNo)
body.hw_addr = addressParser.ethToString(buffer, offset + OFFSETS.hwAddr)
const name = buffer.toString(
'utf8',
offset + OFFSETS.name,
offset + OFFSETS.name + of.maxPortNameLen
)
body.name = name.substr(0, name.indexOf('\0')) // Remove useless 0 if name.length < of.maxPortNameLen
body.config = buffer.readUInt32BE(offset + OFFSETS.config)
body.state = buffer.readUInt32BE(offset + OFFSETS.state)
body.curr = buffer.readUInt32BE(offset + OFFSETS.curr)
body.advertised = buffer.readUInt32BE(offset + OFFSETS.advertised)
body.supported = buffer.readUInt32BE(offset + OFFSETS.supported)
body.peer = buffer.readUInt32BE(offset + OFFSETS.peer)
body.curr_speed = buffer.readUInt32BE(offset + OFFSETS.currSpeed)
body.max_speed = buffer.readUInt32BE(offset + OFFSETS.maxSpeed)
return body
},
}

View File

@@ -0,0 +1,45 @@
import assert from 'assert'
import of from './index'
import scheme from './default-header-scheme'
import { readChunk } from '@vates/read-chunk'
// =============================================================================
export default async function* parse(socket) {
let buffer = Buffer.alloc(1024)
let data
// Read the header
while ((data = await readChunk(socket, scheme.size)) !== null) {
// Read OpenFlow message size from its header
const msgSize = data.readUInt16BE(scheme.offsets.length)
data.copy(buffer, 0, 0, scheme.size)
if (buffer.length < msgSize) {
buffer = resize(buffer, msgSize)
}
// Read the rest of the openflow message
if (msgSize > scheme.size) {
data = await readChunk(socket, msgSize - scheme.size)
assert.notStrictEqual(data, null)
data.copy(buffer, scheme.size, 0, msgSize - scheme.size)
}
yield of.unpack(buffer)
}
}
// -----------------------------------------------------------------------------
function resize(buffer, size) {
let newLength = buffer.length
do {
newLength *= 2
} while (newLength < size)
const newBuffer = Buffer.alloc(newLength)
buffer.copy(newBuffer)
return newBuffer
}

View File

@@ -0,0 +1,64 @@
import assert from 'assert'
import util from 'util'
// =============================================================================
export default {
isEthMaskNone: (buffer, offset) =>
buffer.readUInt32BE(offset) === 0x00000000 &&
buffer.readUInt16BE(offset + 4) === 0x0000,
isEthMaskAll: (buffer, offset) =>
buffer.readUInt32BE(offset) === 0xffffffff &&
buffer.readUInt16BE(offset + 4) === 0xffff,
isIp4MaskNone: (buffer, offset) => buffer.readUInt32BE(offset) === 0x00000000,
isIp4MaskAll: (buffer, offset) => buffer.readUInt32BE(offset) === 0xffffffff,
ethToString: (buffer, offset) =>
buffer.toString('hex', offset, offset + 1) +
':' +
buffer.toString('hex', offset + 1, offset + 2) +
':' +
buffer.toString('hex', offset + 2, offset + 3) +
':' +
buffer.toString('hex', offset + 3, offset + 4) +
':' +
buffer.toString('hex', offset + 4, offset + 5) +
':' +
buffer.toString('hex', offset + 5, offset + 6),
stringToEth: (string, buffer, offset) => {
const eth = /^([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2})$/.exec(
string
)
assert(eth !== null)
buffer.writeUInt8(parseInt(eth[1], 16), offset)
buffer.writeUInt8(parseInt(eth[2], 16), offset + 1)
buffer.writeUInt8(parseInt(eth[3], 16), offset + 2)
buffer.writeUInt8(parseInt(eth[4], 16), offset + 3)
buffer.writeUInt8(parseInt(eth[5], 16), offset + 4)
buffer.writeUInt8(parseInt(eth[6], 16), offset + 5)
},
ip4ToString: (buffer, offset) =>
util.format(
'%d.%d.%d.%d',
buffer.readUInt8(offset),
buffer.readUInt8(offset + 1),
buffer.readUInt8(offset + 2),
buffer.readUInt8(offset + 3)
),
stringToip4: (string, buffer, offset) => {
const ip = /^([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])$/.exec(
string
)
assert(ip !== null)
buffer.writeUInt8(parseInt(ip[1], 10), offset)
buffer.writeUInt8(parseInt(ip[2], 10), offset + 1)
buffer.writeUInt8(parseInt(ip[3], 10), offset + 2)
buffer.writeUInt8(parseInt(ip[4], 10), offset + 3)
},
}

View File

@@ -0,0 +1,11 @@
import assert from 'assert'
export default function get(map, key, errorMsg = undefined) {
const value = map[String(key)]
assert.notStrictEqual(
value,
undefined,
errorMsg !== undefined ? errorMsg : `${key} is invalid`
)
return value
}

View File

@@ -0,0 +1,10 @@
const ZERO = 0x00000000
const ALL = 0xffffffff
// =============================================================================
export default {
isUInt64None: n => n[0] === ZERO && n[1] === ZERO,
isUInt64All: n => n[0] === ALL && n[1] === ALL,
}

View File

@@ -0,0 +1,9 @@
export default {
// TODO: more version
// openFlow10: 0x01,
openFlow11: 0x02,
// openFlow12: 0x03,
// openFlow13: 0x04,
// openFlow14: 0x05,
// openFlow15: 0x06,
}

View File

@@ -19,7 +19,14 @@ Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/self-
```js
import { genSelfSigned } from '@xen-orchestra/self-signed'
console.log(await genSelfSigned())
console.log(
await genSelfSigned({
// Number of days this certificate will be valid.
//
// Default: 360
days: 600,
})
)
// {
// cert: '-----BEGIN CERTIFICATE-----\n' +
// // content…

View File

@@ -1,7 +1,14 @@
```js
import { genSelfSigned } from '@xen-orchestra/self-signed'
console.log(await genSelfSigned())
console.log(
await genSelfSigned({
// Number of days this certificate will be valid.
//
// Default: 360
days: 600,
})
)
// {
// cert: '-----BEGIN CERTIFICATE-----\n' +
// // content…

View File

@@ -10,12 +10,12 @@ const openssl = (cmd, args, { input, ...opts } = {}) =>
}
})
exports.genSelfSignedCert = async () => {
exports.genSelfSignedCert = async ({ days = 360 } = {}) => {
const key = await openssl('genrsa', ['2048'])
return {
cert: await openssl(
'req',
['-batch', '-new', '-key', '-', '-x509', '-days', '360', '-nodes'],
['-batch', '-new', '-key', '-', '-x509', '-days', String(days), '-nodes'],
{
input: key,
}

View File

@@ -33,23 +33,23 @@
"node": ">=8.10"
},
"dependencies": {
"chalk": "^2.2.0",
"chalk": "^4.1.0",
"exec-promise": "^0.7.0",
"fs-extra": "^9.0.0",
"fs-promise": "^2.0.3",
"get-stream": "^4.1.0",
"get-stream": "^6.0.0",
"http-request-plus": "^0.8.0",
"human-format": "^0.10.0",
"human-format": "^0.11.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",
"nice-pipe": "0.0.0",
"pretty-ms": "^4.0.0",
"pretty-ms": "^7.0.0",
"progress-stream": "^2.0.0",
"pw": "^0.0.4",
"strip-indent": "^2.0.0",
"xdg-basedir": "^3.0.0",
"strip-indent": "^3.0.0",
"xdg-basedir": "^4.0.0",
"xo-lib": "^0.9.0",
"xo-vmdk-to-vhd": "^1.2.0"
"xo-vmdk-to-vhd": "^1.3.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1,9 +1,309 @@
# ChangeLog
## **5.47.1** (2020-06-02)
## **5.51.1** (2020-10-14)
![Channel: latest](https://badgen.net/badge/channel/latest/yellow)
### Enhancements
- [Host/Advanced] Add the field `IOMMU` if it is defined (PR [#5294](https://github.com/vatesfr/xen-orchestra/pull/5294))
- [Backup logs/report] Hide merge task when no merge is done (PR [#5263](https://github.com/vatesfr/xen-orchestra/pull/5263))
- [New backup] Enable created schedules by default (PR [#5280](https://github.com/vatesfr/xen-orchestra/pull/5280))
- [Backup/overview] Link backup jobs/schedules to their corresponding logs [#4564](https://github.com/vatesfr/xen-orchestra/issues/4564) (PR [#5260](https://github.com/vatesfr/xen-orchestra/pull/5260))
- [VM] Hide backup tab for non-admin users [#5309](https://github.com/vatesfr/xen-orchestra/issues/5309) (PR [#5317](https://github.com/vatesfr/xen-orchestra/pull/5317))
- [VM/Bulk migrate] Sort hosts in the select so that the hosts on the same pool are shown first [#4462](https://github.com/vatesfr/xen-orchestra/issues/4462) (PR [#5308](https://github.com/vatesfr/xen-orchestra/pull/5308))
- [Proxy] Ability to update HTTP proxy configuration on XOA proxy (PR [#5148](https://github.com/vatesfr/xen-orchestra/pull/5148))
### Bug fixes
- [XOA/Notifications] Don't show expired notifications (PR [#5304](https://github.com/vatesfr/xen-orchestra/pull/5304))
- [Backup/S3] Fix secret key edit form [#5233](https://github.com/vatesfr/xen-orchestra/issues/5233) (PR[#5305](https://github.com/vatesfr/xen-orchestra/pull/5305))
- [New network] Remove the possibility of creating a network on a bond member interface (PR [#5262](https://github.com/vatesfr/xen-orchestra/pull/5262))
- [User] Fix custom filters not showing up when selecting a default filter for templates (PR [#5298](https://github.com/vatesfr/xen-orchestra/pull/5298))
- [Self/VDI migration] Fix hidden VDI after migration (PR [#5296](https://github.com/vatesfr/xen-orchestra/pull/5296))
- [Self/VDI migration] Fix `not enough permissions` error (PR [#5299](https://github.com/vatesfr/xen-orchestra/pull/5299))
- [Home] Hide backup filter for non-admin users [#5285](https://github.com/vatesfr/xen-orchestra/issues/5285) (PR [#5264](https://github.com/vatesfr/xen-orchestra/pull/5264))
- [Backup/S3] Fix request signature error [#5253](https://github.com/vatesfr/xen-orchestra/issues/5253) (PR[#5315](https://github.com/vatesfr/xen-orchestra/pull/5315))
- [SDN Controller] Fix tunnel traffic going on the wrong NIC: see https://xcp-ng.org/forum/topic/3544/mtu-problems-with-vxlan. (PR [#5281](https://github.com/vatesfr/xen-orchestra/pull/5281))
- [Settings/IP Pools] Fix some IP ranges being split into multiple ranges in the UI [#3170](https://github.com/vatesfr/xen-orchestra/issues/3170) (PR [#5314](https://github.com/vatesfr/xen-orchestra/pull/5314))
- [Self/Delete] Detach VMs and remove their ACLs on removing a resource set [#4797](https://github.com/vatesfr/xen-orchestra/issues/4797) (PR [#5312](https://github.com/vatesfr/xen-orchestra/pull/5312))
- Fix `not enough permissions` error when accessing some pages as a Self Service user (PR [#5303](https://github.com/vatesfr/xen-orchestra/pull/5303))
- [VM] Explicit error when VM migration failed due to unset default SR on destination pool [#5282](https://github.com/vatesfr/xen-orchestra/issues/5282) (PR [#5306](https://github.com/vatesfr/xen-orchestra/pull/5306))
### Packages to release
- xo-server-sdn-controller 1.0.4
- xo-server-backup-reports 0.16.7
- xo-server 5.68.0
- xo-web 5.72.0
## **5.51.0** (2020-09-30)
### Highlights
- [Self/VDI migration] Ability to migrate VDIs to other SRs within a resource set [#5020](https://github.com/vatesfr/xen-orchestra/issues/5020) (PR [#5201](https://github.com/vatesfr/xen-orchestra/pull/5201))
- [LDAP] Ability to import LDAP groups to XO [#1884](https://github.com/vatesfr/xen-orchestra/issues/1884) (PR [#5279](https://github.com/vatesfr/xen-orchestra/pull/5279))
- [Tasks] Show XO objects linked to pending/finished tasks [#4275](https://github.com/vatesfr/xen-orchestra/issues/4275) (PR [#5267](https://github.com/vatesfr/xen-orchestra/pull/5267))
- [Backup logs] Ability to filter by VM/pool name [#4406](https://github.com/vatesfr/xen-orchestra/issues/4406) (PR [#5208](https://github.com/vatesfr/xen-orchestra/pull/5208))
- [Backup/logs] Log's tasks pagination [#4406](https://github.com/vatesfr/xen-orchestra/issues/4406) (PR [#5209](https://github.com/vatesfr/xen-orchestra/pull/5209))
### Enhancements
- [VM Import] Make the `Description` field optional (PR [#5258](https://github.com/vatesfr/xen-orchestra/pull/5258))
- [New VM] Hide missing ISOs in selector [#5222](https://github.com/vatesfr/xen-orchestra/issues/5222)
- [Dashboard/Health] Show VMs that have too many snapshots [#5238](https://github.com/vatesfr/xen-orchestra/pull/5238)
- [Groups] Ability to delete multiple groups at once (PR [#5264](https://github.com/vatesfr/xen-orchestra/pull/5264))
### Bug fixes
- [Import VMDK] Fix `No position specified for vmdisk1` error (PR [#5255](https://github.com/vatesfr/xen-orchestra/pull/5255))
- [API] Fix `this.removeSubjectFromResourceSet is not a function` error on calling `resourceSet.removeSubject` via `xo-cli` [#5265](https://github.com/vatesfr/xen-orchestra/issues/5265) (PR [#5266](https://github.com/vatesfr/xen-orchestra/pull/5266))
- [Import OVA] Fix frozen UI when dropping a big OVA on the page (PR [#5274](https://github.com/vatesfr/xen-orchestra/pull/5274))
- [Remotes/S3] Fix S3 backup of 50GB+ files [#5197](https://github.com/vatesfr/xen-orchestra/issues/5197) (PR[ #5242](https://github.com/vatesfr/xen-orchestra/pull/5242) )
- [Import OVA] Improve import speed of embedded gzipped VMDK disks (PR [#5275](https://github.com/vatesfr/xen-orchestra/pull/5275))
- [Remotes] Fix editing bucket and directory for S3 remotes [#5233](https://github.com/vatesfr/xen-orchestra/issues/5233) (PR [5276](https://github.com/vatesfr/xen-orchestra/pull/5276))
### Packages to release
- xo-server-auth-ldap 0.9.0
- @xen-orchestra/fs 0.11.1
- xo-vmdk-to-vhd 1.3.1
- xo-server 5.67.0
- xo-web 5.71.0
## **5.50.3** (2020-09-17)
![Channel: stable](https://badgen.net/badge/channel/stable/green)
### Packages to release
- xo-server-audit 0.8.0
## **5.50.2** (2020-09-10)
### Enhancements
- [VM/network] VIF's locking mode: improve tooltip messages [#4713](https://github.com/vatesfr/xen-orchestra/issues/4713) (PR [#5227](https://github.com/vatesfr/xen-orchestra/pull/5227))
- [Backup/overview] Link log entry to its job [#4564](https://github.com/vatesfr/xen-orchestra/issues/4564) (PR [#5202](https://github.com/vatesfr/xen-orchestra/pull/5202))
### Bug fixes
- [New SR] Fix `Cannot read property 'trim' of undefined` error (PR [#5212](https://github.com/vatesfr/xen-orchestra/pull/5212))
- [Dashboard/Health] Fix suspended VDIs considered as orphans [#5248](https://github.com/vatesfr/xen-orchestra/issues/5248) (PR [#5249](https://github.com/vatesfr/xen-orchestra/pull/5249))
### Packages to release
- xo-server-audit 0.7.2
- xo-web 5.70.0
- xo-server 5.66.2
## **5.50.1** (2020-09-04)
### Enhancements
- [Usage report] Exclude replicated VMs from the VMs evolution [#4778](https://github.com/vatesfr/xen-orchestra/issues/4778) (PR [#5241](https://github.com/vatesfr/xen-orchestra/pull/5241))
### Bug fixes
- [VM/Network] Fix TX checksumming [#5234](https://github.com/vatesfr/xen-orchestra/issues/5234)
### Packages to release
- xo-server-usage-report 0.9.0
- xo-server-audit 0.7.1
- xo-server 5.66.1
## **5.50.0** (2020-08-27)
### Highlights
- [Health/Orphan VDIs] Improve heuristic and list both VDI snapshots and normal VDIs (PR [#5228](https://github.com/vatesfr/xen-orchestra/pull/5228))
- [[Audit] Regularly save fingerprints on remote server for better tempering detection](https://xen-orchestra.com/blog/xo-audit/) [#4844](https://github.com/vatesfr/xen-orchestra/issues/4844) (PR [#5077](https://github.com/vatesfr/xen-orchestra/pull/5077))
- [VM/Network] Ability to change a VIF's locking mode [#4713](https://github.com/vatesfr/xen-orchestra/issues/4713) (PR [#5188](https://github.com/vatesfr/xen-orchestra/pull/5188))
- [VM/Network] Ability to set VIF TX checksumming [#5095](https://github.com/vatesfr/xen-orchestra/issues/5095) (PR [#5182](https://github.com/vatesfr/xen-orchestra/pull/5182))
- [Host/Network] Button to refresh the list of physical interfaces [#5230](https://github.com/vatesfr/xen-orchestra/issues/5230)
- [VM] Ability to protect VM from accidental shutdown [#4773](https://github.com/vatesfr/xen-orchestra/issues/4773)
### Enhancements
- [Proxy] Improve health check error messages [#5161](https://github.com/vatesfr/xen-orchestra/issues/5161) (PR [#5191](https://github.com/vatesfr/xen-orchestra/pull/5191))
- [VM/Console] Hide missing ISOs in selector [#5222](https://github.com/vatesfr/xen-orchestra/issues/5222)
### Bug fixes
- [Proxy/deploy] Fix `no such proxy ok` error on a failure trial start (PR [#5196](https://github.com/vatesfr/xen-orchestra/pull/5196))
- [VM/snapshots] Fix redirection when creating a VM from a snapshot (PR [#5213](https://github.com/vatesfr/xen-orchestra/pull/5213))
- [User] Fix `Incorrect password` error when changing password [#5218](https://github.com/vatesfr/xen-orchestra/issues/5218) (PR [#5221](https://github.com/vatesfr/xen-orchestra/pull/5221))
- [Audit] Obfuscate sensitive data in `user.changePassword` action's records [#5219](https://github.com/vatesfr/xen-orchestra/issues/5219) (PR [#5220](https://github.com/vatesfr/xen-orchestra/pull/5220))
- [SDN Controller] Fix `Cannot read property '$network' of undefined` error at the network creation (PR [#5217](https://github.com/vatesfr/xen-orchestra/pull/5217))
### Packages to release
- xo-server-audit 0.7.0
- xo-server-sdn-controller 1.0.3
- xo-server 5.66.0
- xo-web 5.69.0
## **5.49.1** (2020-08-05)
### Enhancements
- [SR/advanced] Show thin/thick provisioning for missing SR types (PR [#5204](https://github.com/vatesfr/xen-orchestra/pull/5204))
### Bug fixes
- [Patches] Don't log errors related to missing patches listing (Previous fix in 5.48.3 was not working)
### Packages to release
- xo-server 5.64.1
- xo-server-sdn-controller 1.0.2
- xo-web 5.67.0
## **5.49.0** (2020-07-31)
### Highlights
- [Home/VM, host] Ability to filter by power state (PR [#5118](https://github.com/vatesfr/xen-orchestra/pull/5118))
- [Proxy/deploy] Ability to set HTTP proxy configuration (PR [#5145](https://github.com/vatesfr/xen-orchestra/pull/5145))
- [Import/OVA] Allow for VMDK disks inside .ova files to be gzipped (PR [#5085](https://github.com/vatesfr/xen-orchestra/pull/5085))
- [Proxy] Show pending upgrades (PR [#5167](https://github.com/vatesfr/xen-orchestra/pull/5167))
- [SDN Controller] Add/Remove netork traffic rules for a VM's VIFs (PR [#5135](https://github.com/vatesfr/xen-orchestra/pull/5135))
- [Backup/health] Show VM snapshots with missing jobs, schedules or VMs [#5086](https://github.com/vatesfr/xen-orchestra/issues/5086) (PR [#5125](https://github.com/vatesfr/xen-orchestra/pull/5125))
- [New delta backup] Show a warning icon when the advanced full backup interval setting and the backup retention are higher than 50 (PR (https://github.com/vatesfr/xen-orchestra/pull/5144))
- [VM/network] Improve the network locking mode feedback [#4713](https://github.com/vatesfr/xen-orchestra/issues/4713) (PR [#5170](https://github.com/vatesfr/xen-orchestra/pull/5170))
- [Remotes] Add AWS S3 as a backup storage
- [New VM] Only make network boot option first when the VM has no disks or when the network installation is chosen [#4980](https://github.com/vatesfr/xen-orchestra/issues/4980) (PR [#5119](https://github.com/vatesfr/xen-orchestra/pull/5119))
### Enhancements
- Log the `Invalid XML-RPC message` error as an unexpected response (PR [#5138](https://github.com/vatesfr/xen-orchestra/pull/5138))
- [VM/disks] By default, sort disks by their device position instead of their name [#5163](https://github.com/vatesfr/xen-orchestra/issues/5163) (PR [#5165](https://github.com/vatesfr/xen-orchestra/pull/5165))
- [Schedule/edit] Ability to enable/disable an ordinary job's schedule [#5026](https://github.com/vatesfr/xen-orchestra/issues/5026) (PR [#5111](https://github.com/vatesfr/xen-orchestra/pull/5111))
- [New schedule] Enable 'Enable immediately after creation' by default (PR [#5111](https://github.com/vatesfr/xen-orchestra/pull/5111))
- [Self Service] Ability to globally ignore snapshots in resource set quotas (PR [#5164](https://github.com/vatesfr/xen-orchestra/pull/5164))
- [Self] Ability to cancel a resource set edition without saving it (PR [#5174](https://github.com/vatesfr/xen-orchestra/pull/5174))
- [VIF] Ability to click an IP address to copy it to the clipboard [#5185](https://github.com/vatesfr/xen-orchestra/issues/5185) (PR [#5186](https://github.com/vatesfr/xen-orchestra/pull/5186))
### Bug fixes
- [Backup/Restore] Fixes `an error has occurred` when all backups for a specific VM have been deleted (PR [#5156](https://github.com/vatesfr/xen-orchestra/pull/5156))
- [OVA Import] Fix import of Red Hat generated .ova files (PR [#5159](https://github.com/vatesfr/xen-orchestra/pull/5159))
- [Fast clone] Fix bug where the name of the created VM would be "undefined_clone" (PR [#5173](https://github.com/vatesfr/xen-orchestra/pull/5173))
- [Audit] Fix unreadable exported records format (PR [#5179](https://github.com/vatesfr/xen-orchestra/pull/5179))
- [SDN Controller] Fixes TLS error `dh key too small` [#5074](https://github.com/vatesfr/xen-orchestra/issues/5074) (PR [#5187](https://github.com/vatesfr/xen-orchestra/pull/5187))
### Released packages
- xo-server-audit 0.6.1
- @xen-orchestra/openflow 0.1.1
- xo-server-sdn-controller 1.0.1
- xo-vmdk-to-vhd 1.3.0
- xo-remote-parser 0.6.0
- @xen-orchestra/fs 0.11.0
- xo-server 5.64.0
- xo-web 5.66.0
## **5.48.3** (2020-07-10)
### Enhancements
- [Audit] Logging user actions is now opt-in (PR [#5151](https://github.com/vatesfr/xen-orchestra/pull/5151))
- [Settings/Audit] Warn if logging is inactive (PR [#5152](https://github.com/vatesfr/xen-orchestra/pull/5152))
### Bug fixes
- [Proxy] Don't use configured HTTP proxy to connect to XO proxy
- [Backup with proxy] Correctly log job-level errors
- [XO] Fix a few broken documentation links (PR [#5146](https://github.com/vatesfr/xen-orchestra/pull/5146))
- [Patches] Don't log errors related to missing patches listing (PR [#5149](https://github.com/vatesfr/xen-orchestra/pull/5149))
### Released packages
- xo-server-audit 0.6.0
- xo-web 5.64.0
- xo-server 5.62.1
## **5.48.2** (2020-07-07)
### Enhancements
- [Backup] Better resolution of the "last run log" quick access (PR [#5141](https://github.com/vatesfr/xen-orchestra/pull/5141))
- [Patches] Don't check patches on halted XCP-ng hosts (PR [#5140](https://github.com/vatesfr/xen-orchestra/pull/5140))
- [XO] Don't check time consistency on halted hosts (PR [#5140](https://github.com/vatesfr/xen-orchestra/pull/5140))
### Bug fixes
- [Smart backup/edit] Fix "Excluded VMs tags" being reset to the default ones (PR [#5136](https://github.com/vatesfr/xen-orchestra/pull/5136))
### Released packages
- xo-web 5.63.0
## **5.48.1** (2020-07-03)
### Enhancements
- [Home] Remove 'tags' filter from the filter selector since tags have their own selector (PR [#5121](https://github.com/vatesfr/xen-orchestra/pull/5121))
- [Backup/New] Add "XOA Proxy" to the excluded tags by default (PR [#5128](https://github.com/vatesfr/xen-orchestra/pull/5128))
- [Backup/overview] Don't open backup job edition in a new tab (PR [#5130](https://github.com/vatesfr/xen-orchestra/pull/5130))
### Bug fixes
- [Restore legacy, File restore legacy] Fix mount error in case of existing proxy remotes (PR [#5124](https://github.com/vatesfr/xen-orchestra/pull/5124))
- [File restore] Don't fail with `TypeError [ERR_INVALID_ARG_TYPE]` on LVM partitions
- [Import/OVA] Fix import of bigger OVA files (>8GB .vmdk disk) (PR [#5129](https://github.com/vatesfr/xen-orchestra/pull/5129))
### Released packages
- xo-vmdk-to-vhd 1.2.1
- xo-server 5.62.0
- xo-web 5.62.0
## **5.48.0** (2020-06-30)
### Highlights
- [VM/Network] Show IP addresses in front of their VIFs [#4882](https://github.com/vatesfr/xen-orchestra/issues/4882) (PR [#5003](https://github.com/vatesfr/xen-orchestra/pull/5003))
- [Home/Template] Ability to copy/clone VM templates [#4734](https://github.com/vatesfr/xen-orchestra/issues/4734) (PR [#5006](https://github.com/vatesfr/xen-orchestra/pull/5006))
- [VM] Ability to protect VM from accidental deletion [#4773](https://github.com/vatesfr/xen-orchestra/issues/4773) (PR [#5045](https://github.com/vatesfr/xen-orchestra/pull/5045))
- [VM] Differentiate PV drivers detection from management agent detection [#4783](https://github.com/vatesfr/xen-orchestra/issues/4783) (PR [#5007](https://github.com/vatesfr/xen-orchestra/pull/5007))
- [SR/Advanced, SR selector] Show thin/thick provisioning [#2208](https://github.com/vatesfr/xen-orchestra/issues/2208) (PR [#5081](https://github.com/vatesfr/xen-orchestra/pull/5081))
- [Backup/health] Show VM backups with missing jobs, schedules and VMs [#4716](https://github.com/vatesfr/xen-orchestra/issues/4716) (PR [#5062](https://github.com/vatesfr/xen-orchestra/pull/5062))
### Enhancements
- [Plugin] Disable test plugin action when the plugin is not loaded (PR [#5038](https://github.com/vatesfr/xen-orchestra/pull/5038))
- [VM/bulk copy] Add fast clone option (PR [#5006](https://github.com/vatesfr/xen-orchestra/pull/5006))
- [Home/VM] Homogenize the list of backed up VMs with the normal list (PR [#5046](https://github.com/vatesfr/xen-orchestra/pull/5046))
- [SR/Disks] Add tooltip for disabled migration (PR [#4884](https://github.com/vatesfr/xen-orchestra/pull/4884))
- [Licenses] Ability to move a license from another XOA to the current XOA (PR [#5110](https://github.com/vatesfr/xen-orchestra/pull/5110))
### Bug fixes
- [VM/Creation] Fix `insufficient space` which could happened when moving and resizing disks (PR [#5044](https://github.com/vatesfr/xen-orchestra/pull/5044))
- [VM/General] Fix displayed IPV6 instead of IPV4 in case of an old version of XenServer (PR [#5036](https://github.com/vatesfr/xen-orchestra/pull/5036))
- [Host/Load-balancer] Fix VM migration condition: free memory in the destination host must be greater or equal to used VM memory (PR [#5054](https://github.com/vatesfr/xen-orchestra/pull/5054))
- [Home] Broken "Import VM" link [#5055](https://github.com/vatesfr/xen-orchestra/issues/5055) (PR [#5056](https://github.com/vatesfr/xen-orchestra/pull/5056))
- [Home/SR] Fix inability to edit SRs' name [#5057](https://github.com/vatesfr/xen-orchestra/issues/5057) (PR [#5058](https://github.com/vatesfr/xen-orchestra/pull/5058))
- [Backup] Fix huge logs in case of Continuous Replication or Disaster Recovery errors (PR [#5069](https://github.com/vatesfr/xen-orchestra/pull/5069))
- [Notification] Fix same notification showing again as unread (PR [#5067](https://github.com/vatesfr/xen-orchestra/pull/5067))
- [SDN Controller] Fix broken private network creation when specifiyng a preferred center [#5076](https://github.com/vatesfr/xen-orchestra/issues/5076) (PRs [#5079](https://github.com/vatesfr/xen-orchestra/pull/5079) & [#5080](https://github.com/vatesfr/xen-orchestra/pull/5080))
- [Import/VMDK] Import of VMDK disks has been broken since 5.45.0 (PR [#5087](https://github.com/vatesfr/xen-orchestra/pull/5087))
- [Remotes] Fix not displayed used/total disk (PR [#5093](https://github.com/vatesfr/xen-orchestra/pull/5093))
- [Perf alert] Regroup items with missing stats in one email [#3137](https://github.com/vatesfr/xen-orchestra/issues/3137) (PR [#4413](https://github.com/vatesfr/xen-orchestra/pull/4413))
### Released packages
- xo-server-perf-alert 0.2.3
- xo-server-audit 0.5.0
- xo-server-sdn-controller 0.4.3
- xo-server-load-balancer 0.3.3
- xo-server 5.61.1
- xo-web 5.61.1
## **5.47.1** (2020-06-02)
### Bug fixes
- [auth-ldap] Sign in was broken in XO 5.47.0 (PR [#5039](https://github.com/vatesfr/xen-orchestra/pull/5039))
@@ -60,8 +360,6 @@
## **5.46.0** (2020-04-30)
![Channel: stable](https://badgen.net/badge/channel/stable/green)
### Highlights
- [Internationalization] Italian translation (Thanks [@infodavide](https://github.com/infodavide)!) [#4908](https://github.com/vatesfr/xen-orchestra/issues/4908) (PRs [#4931](https://github.com/vatesfr/xen-orchestra/pull/4931) [#4932](https://github.com/vatesfr/xen-orchestra/pull/4932))
@@ -318,7 +616,7 @@
- [Backup NG] Make report recipients configurable in the backup settings [#4581](https://github.com/vatesfr/xen-orchestra/issues/4581) (PR [#4646](https://github.com/vatesfr/xen-orchestra/pull/4646))
- [Host] Advanced Live Telemetry (PR [#4680](https://github.com/vatesfr/xen-orchestra/pull/4680))
- [Plugin][web hooks](https://xen-orchestra.com/docs/web-hooks.html) [#1946](https://github.com/vatesfr/xen-orchestra/issues/1946) (PR [#3155](https://github.com/vatesfr/xen-orchestra/pull/3155))
- [Plugin][web hooks](https://xen-orchestra.com/docs/advanced.html#web-hooks) [#1946](https://github.com/vatesfr/xen-orchestra/issues/1946) (PR [#3155](https://github.com/vatesfr/xen-orchestra/pull/3155))
### Enhancements

View File

@@ -7,30 +7,15 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [VM/Network] Show IP addresses in front of their VIFs [#4882](https://github.com/vatesfr/xen-orchestra/issues/4882) (PR [#5003](https://github.com/vatesfr/xen-orchestra/pull/5003))
- [VM] Ability to protect VM from accidental deletion [#4773](https://github.com/vatesfr/xen-orchestra/issues/4773)
- [Plugin] Disable test plugin action when the plugin is not loaded (PR [#5038](https://github.com/vatesfr/xen-orchestra/pull/5038))
- [Home/Template] Ability to copy/clone VM templates [#4734](https://github.com/vatesfr/xen-orchestra/issues/4734) (PR [#5006](https://github.com/vatesfr/xen-orchestra/pull/5006))
- [VM/bulk copy] Add fast clone option (PR [#5006](https://github.com/vatesfr/xen-orchestra/pull/5006))
- [VM] Differentiate PV drivers detection from management agent detection [#4783](https://github.com/vatesfr/xen-orchestra/issues/4783) (PR [#5007](https://github.com/vatesfr/xen-orchestra/pull/5007))
- [Home/VM] Homogenize the list of backed up VMs with the normal list (PR [#5046](https://github.com/vatesfr/xen-orchestra/pull/5046)
- [SR/Disks] Add tooltip for disabled migration (PR [#4884](https://github.com/vatesfr/xen-orchestra/pull/4884))
- [SR/Advanced, SR selector] Show thin/thick provisioning [#2208](https://github.com/vatesfr/xen-orchestra/issues/2208) (PR [#5081](https://github.com/vatesfr/xen-orchestra/pull/5081))
- [Host/Advanced] Display installed certificates [#5134](https://github.com/vatesfr/xen-orchestra/issues/5134) (PR [#5319](https://github.com/vatesfr/xen-orchestra/pull/5319))
- [VM/network] Allow Self Service users to change a VIF's network [#5020](https://github.com/vatesfr/xen-orchestra/issues/5020) (PR [#5203](https://github.com/vatesfr/xen-orchestra/pull/5203))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [VM/Creation] Fix `insufficient space` which could happened when moving and resizing disks (PR [#5044](https://github.com/vatesfr/xen-orchestra/pull/5044))
- [VM/General] Fix displayed IPV6 instead of IPV4 in case of an old version of XenServer (PR [#5036](https://github.com/vatesfr/xen-orchestra/pull/5036)))
- [Host/Load-balancer] Fix VM migration condition: free memory in the destination host must be greater or equal to used VM memory (PR [#5054](https://github.com/vatesfr/xen-orchestra/pull/5054))
- [Home] Broken "Import VM" link [#5055](https://github.com/vatesfr/xen-orchestra/issues/5055) (PR [#5056](https://github.com/vatesfr/xen-orchestra/pull/5056))
- [Home/SR] Fix inability to edit SRs' name [#5057](https://github.com/vatesfr/xen-orchestra/issues/5057) (PR [#5058](https://github.com/vatesfr/xen-orchestra/pull/5058))
- [Backup] Fix huge logs in case of Continuous Replication or Disaster Recovery errors (PR [#5069](https://github.com/vatesfr/xen-orchestra/pull/5069))
- [Notification] Fix same notification showing again as unread (PR [#5067](https://github.com/vatesfr/xen-orchestra/pull/5067))
- [SDN Controller] Fix broken private network creation when specifiyng a preferred center [#5076](https://github.com/vatesfr/xen-orchestra/issues/5076) (PRs [#5079](https://github.com/vatesfr/xen-orchestra/pull/5079) & [#5080](https://github.com/vatesfr/xen-orchestra/pull/5080))
- [Import/VMDK] Import of VMDK disks has been broken since 5.45.0 (PR [#5087](https://github.com/vatesfr/xen-orchestra/pull/5087))
- [Remotes] Fix not displayed used/total disk (PR [#5093](https://github.com/vatesfr/xen-orchestra/pull/5093))
- [Host] Fix power state stuck on busy after power off [#4919](https://github.com/vatesfr/xen-orchestra/issues/4919) (PR [#5288](https://github.com/vatesfr/xen-orchestra/pull/5288))
- [VM/Network] Don't allow users to change a VIF's locking mode if they don't have permissions on the network (PR [#5283](https://github.com/vatesfr/xen-orchestra/pull/5283))
### Packages to release
@@ -49,8 +34,8 @@
>
> In case of conflict, the highest (lowest in previous list) `$version` wins.
- vhd-lib minor
- @xen-orchestra/audit-core minor
- xo-server-audit minor
- xo-server-sdn-controller patch
- xo-server-load-balancer patch
- xo-server minor
- xo-web minor
- xo-server minor

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 21 KiB

BIN
docs/assets/add-rule.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

BIN
docs/assets/show-rules.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

View File

@@ -83,3 +83,7 @@ To check your free space, enter your XOA and run `xoa check` to check free syste
This is happening when you have a _smart backup job_ that doesn't match any VMs. For example: you created a job to backup all running VMs. If no VMs are running on backup schedule, you'll have this message. This could also happen if you lost connection with your pool master (the VMs aren't visible anymore from Xen Orchestra).
Edit your job and try to see matching VMs or check if your pool is connected to XOA.
## Error: SR_OPERATION_NOT_SUPPORTED
This error can be caused by leaving any removable device (such as USB storage) attached to the VM that you are backing up or snapshotting, detach the device and retry. This can also be caused if you created a VM disk using the [RAW format](https://xcp-ng.org/docs/storage.html#using-raw-format).

View File

@@ -72,7 +72,7 @@ Now if you do this:
It means any VMs on "Lab Pool" with the "prod" tag will be backed up.
## RAM Enabled bakcup
## RAM Enabled backup
:::tip
This feature is **only compatible** with XCP-ng 8.0 or more recent. Citrix Hypervisor didn't yet merge our changes, despite we contributed to their code directly.

View File

@@ -14,6 +14,18 @@ This section will cover the license management system for commercial editions of
![](./assets/activate-confirm.png)
## Rebind XO license
:::warning
Once a license is bind, the only way to unbind it is to contact us with a [support ticket](https://xen-orchestra.com/#!/member/support)!
A license can only be bind to a single appliance at the same time, rebind your license will unbind the license from any other appliance.
:::
To rebind your Xen Orchestra appliance, you simply need to connect on the **appliance on which you want to bind the license** and click on the rebind option (Move license to this XOA button) in the license section
![](./assets/rebind-screen.png)
You will then have a confirmation screen
![](./assets/confirm-rebind.png)
Once it's done, you simply need to proceed to an upgrade on your freshly binded appliance to download the correct edition packages.

View File

@@ -36,7 +36,7 @@ In the network creation view:
:::tip
- All hosts in a private network must be able to reach the other hosts' management interface.
- All hosts in a private network must be able to reach the other hosts' management interface and all hosts must be able to reach one another on the interface selected for private networks creation.
> The term management interface is used to indicate the IP-enabled NIC that carries the management traffic.
- Only 1 encrypted GRE network and 1 encrypted VxLAN network per pool can exist at a time due to Open vSwitch limitation.
:::
@@ -75,6 +75,32 @@ Encryption is not available prior to XCP-ng 8.0.
## OpenFlow rules
Please see the [devblog about OpenFlow rules](https://xen-orchestra.com/blog/vms-vif-network-traffic-control/).
This feature requires the OpenFlow port to be opened
In the VM network tab a new column has been added: _Network rules_.
- The _Add rule_ button display a form to add a new rule choosing to:
- enable/disable the matching traffic
- for a specific protocol (optionnal)
- on a specific port (optionnal)
- matching a specific IP or IP range (optionnal)
- coming from the VIF / going to the VIF / both
- The _Show rules_ button allow to display all rules for a VIF.
- When the rules are display a button to delete a rule is available.
![](./assets/add-rule.png)
![](./assets/show-rules.png)
:::tip
This feature is about to be released soon. Stay tuned!
:::
- This feature requires the OpenFlow port (TCP 6653) to be opened. (See [the requirements](#openflow))
:::
### Requirements
### Openflow
- On XCP-ng prior to 8.0:
- To be able to use `OpenFlow`, the following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `OpenFlow` is wanted: `-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m tcp --dport 6653 -j ACCEPT`

View File

@@ -16,7 +16,7 @@ Xen Orchestra should be fully functional with any version of these two virtualiz
## XCP-ng
:::tip
Xen Orchestra and XCP-ng are mainly edited by the same company ([Vates](https://vates.fr)). That's why you are sure to have the best compatibility with both XCP-ng and XO!
Xen Orchestra and XCP-ng are mainly edited by the same company ([Vates](https://vates.fr)). That's why you are sure to have the best compatibility with both XCP-ng and XO! Also, we strongly suggest people to keep using the latest XCP-ng version as far as possible (or N-1).
:::
- XCP-ng 8.1 ✅ 🚀
@@ -25,14 +25,9 @@ Xen Orchestra and XCP-ng are mainly edited by the same company ([Vates](https://
- XCP-ng 7.5 ✅ ❗
- XCP-ng 7.4 ✅ ❗
:::tip
We strongly suggest people to keep using the latest XCP-ng version as far as possible (or N-1).
:::
## Citrix Hypervisor (formerly XenServer)
Backup restore for large VM disks (>1TiB usage) is [broken on old XenServer versions](https://bugs.xenserver.org/browse/XSO-868) (except 7.1 LTS up-to-date and superior to 7.6).
- Citrix Hypervisor 8.2 LTS ✅
- Citrix Hypervisor 8.1 ✅
- Citrix Hypervisor 8.0 ✅
- XenServer 7.6 ✅ ❗
@@ -46,9 +41,14 @@ Backup restore for large VM disks (>1TiB usage) is [broken on old XenServer vers
- XenServer 6.5 ✅ ❗
- Random Delta backup issues
- XenServer 6.1 and 6.2 ❎ ❗
- No Delta backup and CR support
- **No official support** due to missing JSON-RPC (only XML, too CPU intensive)
- Not compatible with Delta backup and CR
- XenServer 5.x ❎ ❗
- Basic administration features only
- Basic administration features only, **no official support**
:::warning
Backup restore for large VM disks (>1TiB usage) is [broken on old XenServer versions](https://bugs.xenserver.org/browse/XSO-868) (except 7.1 LTS up-to-date and superior to 7.6).
:::
## Others

View File

@@ -237,7 +237,11 @@ Then, you can define quotas on this set:
- max disk usage
:::tip
Snapshotting a VM within a self-service will _not_ use the quota from the resource set. The same rule applies for backups and replication.
Replicated VMs and snapshots created by a backup job don't use quotas.
:::
:::tip
A snapshot of a Self Service VM will use as much resources as a VM would. You can disable this by setting `ignoreVmSnapshotResources` to `true` in the `selfService` section of `xo-server`'s config.
:::
When you click on create, you can see the resource set and remove or edit it:

View File

@@ -6,7 +6,7 @@
"babel-eslint": "^10.0.1",
"babel-jest": "^26.0.1",
"benchmark": "^2.1.4",
"eslint": "^6.0.1",
"eslint": "^7.6.0",
"eslint-config-prettier": "^6.0.0",
"eslint-config-standard": "^14.1.0",
"eslint-config-standard-jsx": "^8.1.0",
@@ -17,7 +17,7 @@
"eslint-plugin-react": "^7.6.1",
"eslint-plugin-standard": "^4.0.0",
"exec-promise": "^0.7.0",
"flow-bin": "^0.126.0",
"flow-bin": "^0.131.0",
"globby": "^11.0.1",
"handlebars": "^4.7.6",
"husky": "^4.2.5",
@@ -39,6 +39,15 @@
},
"jest": {
"collectCoverage": true,
"moduleNameMapper": {
"^.": "./src",
"^(@vates/[^/]+)": "$1/src",
"^(@xen-orchestra/[^/]+)": "$1/src",
"^(value-matcher)": "$1/src",
"^(vhd-cli)": "$1/src",
"^(vhd-lib)": "$1/src",
"^(xo-[^/]+)": "$1/src"
},
"projects": [
"<rootDir>"
],
@@ -65,12 +74,11 @@
"dev-test": "jest --bail --watch \"^(?!.*\\.integ\\.spec\\.js$)\"",
"docs:dev": "vuepress dev docs",
"docs:build": "vuepress build docs",
"posttest": "scripts/run-script test",
"prepare": "scripts/run-script prepare",
"pretest": "eslint --ignore-path .gitignore .",
"prettify": "prettier --ignore-path .gitignore --write '**/*.{js,jsx,md,mjs,ts,tsx}'",
"test": "jest \"^(?!.*\\.integ\\.spec\\.js$)\"",
"test": "npm run test-lint && npm run test-unit",
"test-integration": "jest \".integ\\.spec\\.js$\"",
"test-lint": "eslint --ignore-path .gitignore .",
"test-unit": "jest \"^(?!.*\\.integ\\.spec\\.js$)\" && scripts/run-script test",
"travis-tests": "scripts/travis-tests"
},
"workspaces": [

View File

@@ -564,9 +564,13 @@ const parser = P.grammar({
).map(_ => new Or(_[4])),
P.seq(P.text('!'), r.ws, r.term).map(_ => new Not(_[2])),
P.seq(P.regex(/[<>]=?/), r.rawString).map(([op, val]) => {
val = +val
if (Number.isNaN(val)) {
throw new TypeError('value must be a number')
let num = +val
if (!Number.isNaN(num)) {
num = ms(val)
if (num === undefined) {
throw new TypeError('value must be a number')
}
num += Date.now()
}
return new Comparison(op, val)
}),

View File

@@ -16,7 +16,6 @@ Installation of the [npm package](https://npmjs.org/package/value-matcher):
```js
import { createPredicate } from 'value-matcher'
;[
{ user: 'sam', age: 65, active: false },
{ user: 'barney', age: 36, active: true },
@@ -32,6 +31,53 @@ import { createPredicate } from 'value-matcher'
// ]
```
## Supported predicates
### `any`
The value must be strictly equal to the pattern.
```js
const predicate = createPredicate(42)
predicate(42) // true
predicate('foo') // false
```
### `{ [property: string]: Pattern }`
The value must be an object with all pattern properties matching.
```js
const predicate = createPredicate({ foo: 'bar' })
predicate({ foo: 'bar', baz: 42 }) // true
predicate('foo') // false
```
### `Pattern[]`
The value must be an array with some of its items matching each of pattern items.
```js
const predicate = createPredicate([42, { foo: 'bar' }])
predicate([false, { foo: 'bar', baz: 42 }, null, 42]) // true
predicate('foo') // false
```
### `{ __all: Pattern[] }`
All patterns must match.
### `{ __or: Pattern[] }`
At least one pattern must match.
### `{ __not: Pattern }`
The pattern must not match.
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -1,6 +1,5 @@
```js
import { createPredicate } from 'value-matcher'
;[
{ user: 'sam', age: 65, active: false },
{ user: 'barney', age: 36, active: true },
@@ -15,3 +14,50 @@ import { createPredicate } from 'value-matcher'
// { user: 'barney', age: 36, active: true },
// ]
```
## Supported predicates
### `any`
The value must be strictly equal to the pattern.
```js
const predicate = createPredicate(42)
predicate(42) // true
predicate('foo') // false
```
### `{ [property: string]: Pattern }`
The value must be an object with all pattern properties matching.
```js
const predicate = createPredicate({ foo: 'bar' })
predicate({ foo: 'bar', baz: 42 }) // true
predicate('foo') // false
```
### `Pattern[]`
The value must be an array with some of its items matching each of pattern items.
```js
const predicate = createPredicate([42, { foo: 'bar' }])
predicate([false, { foo: 'bar', baz: 42 }, null, 42]) // true
predicate('foo') // false
```
### `{ __all: Pattern[] }`
All patterns must match.
### `{ __or: Pattern[] }`
At least one pattern must match.
### `{ __not: Pattern }`
The pattern must not match.

View File

@@ -43,7 +43,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}

View File

@@ -28,7 +28,7 @@
"node": ">=8.10"
},
"dependencies": {
"@xen-orchestra/fs": "^0.10.4",
"@xen-orchestra/fs": "^0.11.1",
"cli-progress": "^3.1.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
@@ -52,7 +52,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -36,12 +36,12 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"@xen-orchestra/fs": "^0.10.4",
"@xen-orchestra/fs": "^0.11.1",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"execa": "^4.0.2",
"fs-promise": "^2.0.0",
"get-stream": "^5.1.0",
"get-stream": "^6.0.0",
"index-modules": "^0.3.0",
"readable-stream": "^3.0.6",
"rimraf": "^3.0.0",
@@ -53,7 +53,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
},
"author": {

View File

@@ -307,19 +307,15 @@ export default class Vhd {
return this._write(blockTable.slice(i, i + 4), this.header.tableOffset + i)
}
// Make a new empty block at vhd end.
// Update block allocation table in context and in file.
// Allocate a new uninitialized block in the BAT
async _createBlock(blockId) {
assert.strictEqual(this._getBatEntry(blockId), BLOCK_UNUSED)
const blockAddr = Math.ceil(this._getEndOfData() / SECTOR_SIZE)
debug(`create block ${blockId} at ${blockAddr}`)
await Promise.all([
// Write an empty block and addr in vhd file.
this._write(Buffer.alloc(this.fullBlockSize), sectorsToBytes(blockAddr)),
this._setBatEntry(blockId, blockAddr),
])
await this._setBatEntry(blockId, blockAddr)
return blockAddr
}

View File

@@ -37,12 +37,12 @@
},
"dependencies": {
"archy": "^1.0.0",
"chalk": "^3.0.0",
"chalk": "^4.1.0",
"exec-promise": "^0.7.0",
"human-format": "^0.10.0",
"human-format": "^0.11.0",
"lodash": "^4.17.4",
"pw": "^0.0.4",
"xen-api": "^0.28.5"
"xen-api": "^0.29.0"
},
"devDependencies": {
"@babel/cli": "^7.1.5",

View File

@@ -2,7 +2,7 @@
"dependencies": {
"getopts": "^2.2.3",
"golike-defer": "^0.4.1",
"human-format": "^0.10.1",
"human-format": "^0.11.0",
"process-top": "^1.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.13.0",

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xen-api",
"version": "0.28.5",
"version": "0.29.0",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [
@@ -40,7 +40,7 @@
"event-to-promise": "^0.8.0",
"exec-promise": "^0.7.0",
"http-request-plus": "^0.8.0",
"jest-diff": "^24.0.0",
"jest-diff": "^26.4.2",
"json-rpc-protocol": "^0.13.1",
"kindof": "^2.0.0",
"lodash": "^4.17.4",

View File

@@ -34,17 +34,17 @@
},
"dependencies": {
"bluebird": "^3.5.1",
"chalk": "^3.0.0",
"chalk": "^4.1.0",
"exec-promise": "^0.7.0",
"fs-promise": "^2.0.3",
"http-request-plus": "^0.9.1",
"human-format": "^0.10.0",
"human-format": "^0.11.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",
"micromatch": "^4.0.2",
"mkdirp": "^0.5.1",
"mkdirp": "^1.0.4",
"nice-pipe": "0.0.0",
"pretty-ms": "^5.0.0",
"pretty-ms": "^7.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.15.0",
"pump": "^3.0.0",

View File

@@ -8,7 +8,7 @@ const readFile = promisify(require('fs').readFile)
const writeFile = promisify(require('fs').writeFile)
const l33t = require('l33teral')
const mkdirp = promisify(require('mkdirp'))
const mkdirp = require('mkdirp')
const xdgBasedir = require('xdg-basedir')
// ===================================================================

View File

@@ -41,7 +41,7 @@
"end-of-stream": "^1.1.0",
"exec-promise": "^0.7.0",
"highland": "^2.10.1",
"through2": "^3.0.0",
"through2": "^4.0.2",
"xo-lib": "^0.9.0"
},
"devDependencies": {

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xo-remote-parser",
"version": "0.5.0",
"version": "0.6.0",
"license": "AGPL-3.0-or-later",
"description": "",
"keywords": [],
@@ -26,7 +26,8 @@
"node": ">=6"
},
"dependencies": {
"lodash": "^4.13.1"
"lodash": "^4.13.1",
"url-parse": "^1.4.7"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -42,7 +43,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -2,6 +2,7 @@ import filter from 'lodash/filter'
import map from 'lodash/map'
import trim from 'lodash/trim'
import trimStart from 'lodash/trimStart'
import Url from 'url-parse'
const NFS_RE = /^([^:]+):(?:(\d+):)?([^:]+)$/
const SMB_RE = /^([^:]+):(.+)@([^@]+)\\\\([^\0]+)(?:\0(.*))?$/
@@ -39,6 +40,13 @@ export const parse = string => {
object.domain = domain
object.username = username
object.password = password
} else if (type === 's3') {
const parsed = new Url(string)
object.type = 's3'
object.host = parsed.host
object.path = parsed.pathname
object.username = parsed.username
object.password = decodeURIComponent(parsed.password)
}
return object
}
@@ -60,6 +68,9 @@ export const format = ({
if (type === 'smb') {
string += `${username}:${password}@${domain}\\\\${host}`
}
if (type === 's3') {
string += `${username}:${encodeURIComponent(password)}@${host}`
}
path = sanitizePath(path)
if (type === 'smb') {
path = path.split('/')

View File

@@ -44,6 +44,17 @@ const data = deepFreeze({
path: '/media/nfs',
},
},
S3: {
string:
's3://AKIAS:XSuBupZ0mJlu%2B@s3-us-west-2.amazonaws.com/test-bucket/dir',
object: {
type: 's3',
host: 's3-us-west-2.amazonaws.com',
path: '/test-bucket/dir',
username: 'AKIAS',
password: 'XSuBupZ0mJlu+',
},
},
})
const parseData = deepFreeze({

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-audit",
"version": "0.4.0",
"version": "0.8.0",
"license": "AGPL-3.0-or-later",
"description": "Audit plugin for XO-Server",
"keywords": [
@@ -36,6 +36,7 @@
"devDependencies": {
"@babel/cli": "^7.7.0",
"@babel/core": "^7.7.2",
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.0.0",
"@babel/preset-env": "^7.7.1",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
@@ -49,8 +50,10 @@
},
"dependencies": {
"@xen-orchestra/audit-core": "^0.1.1",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/log": "^0.2.0",
"async-iterator-to-stream": "^1.1.0",
"lodash": "^4.17.19",
"promise-toolbox": "^0.15.0",
"readable-stream": "^3.5.0",
"xo-common": "^0.5.0"

View File

@@ -2,6 +2,7 @@ import asyncIteratorToStream from 'async-iterator-to-stream'
import createLogger from '@xen-orchestra/log'
import { alteredAuditRecord, missingAuditRecord } from 'xo-common/api-errors'
import { createGzip } from 'zlib'
import { createSchedule } from '@xen-orchestra/cron'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import {
@@ -18,23 +19,32 @@ const DEFAULT_BLOCKED_LIST = {
'acl.get': true,
'acl.getCurrentPermissions': true,
'audit.checkIntegrity': true,
'audit.clean': true,
'audit.deleteRange': true,
'audit.generateFingerprint': true,
'audit.getRecords': true,
'backup.list': true,
'backupNg.getAllJobs': true,
'backupNg.getAllLogs': true,
'backupNg.listVmBackups': true,
'cloud.getResourceCatalog': true,
'cloudConfig.getAll': true,
'group.getAll': true,
'host.isHostServerTimeConsistent': true,
'host.isHyperThreadingEnabled': true,
'host.stats': true,
'ipPool.getAll': true,
'job.getAll': true,
'log.get': true,
'metadataBackup.getAllJobs': true,
'network.getBondModes': true,
'pif.getIpv4ConfigurationModes': true,
'plugin.get': true,
'pool.listMissingPatches': true,
'proxy.getAll': true,
'remote.getAll': true,
'remote.getAllInfo': true,
'remote.list': true,
'resourceSet.getAll': true,
'role.getAll': true,
'schedule.getAll': true,
@@ -47,15 +57,36 @@ const DEFAULT_BLOCKED_LIST = {
'system.getServerTimezone': true,
'system.getServerVersion': true,
'user.getAll': true,
'vm.getHaValues': true,
'vm.stats': true,
'xo.getAllObjects': true,
'xoa.getApplianceInfo': true,
'xoa.licenses.get': true,
'xoa.licenses.getAll': true,
'xoa.licenses.getSelf': true,
'xoa.supportTunnel.getState': true,
'xosan.checkSrCurrentState': true,
'xosan.computeXosanPossibleOptions': true,
'xosan.getVolumeInfo': true,
}
const LAST_ID = 'lastId'
// interface Db {
// lastId: string
// [RecordId: string]: {
// data: object
// event: string
// id: strings
// previousId: string
// subject: {
// userId: string
// userIp: string
// userName: string
// }
// time: number
// }
// }
class Db extends Storage {
constructor(db) {
super()
@@ -85,6 +116,40 @@ class Db extends Storage {
getLastId() {
return this.get(LAST_ID)
}
async clean() {
const db = this._db
// delete first so that a new chain can be constructed even if anything else fails
await db.del(LAST_ID)
return new Promise((resolve, reject) => {
let count = 1
const cb = () => {
if (--count === 0) {
resolve()
}
}
const deleteEntry = key => {
++count
db.del(key, cb)
}
db.createKeyStream()
.on('data', deleteEntry)
.on('end', cb)
.on('error', reject)
})
}
}
export const configurationSchema = {
type: 'object',
properties: {
active: {
description: 'Whether to save user actions in the audit log',
type: 'boolean',
},
},
}
const NAMESPACE = 'audit'
@@ -97,8 +162,30 @@ class AuditXoPlugin {
this._cleaners = []
this._xo = xo
const { enabled = true, schedule: { cron = '0 6 * * *', timezone } = {} } =
staticConfig.lastHashUpload ?? {}
if (enabled) {
this._uploadLastHashJob = createSchedule(cron, timezone).createJob(() =>
this._uploadLastHash().catch(log.error)
)
}
this._auditCore = undefined
this._storage = undefined
this._listeners = {
'xo:audit': this._handleEvent.bind(this),
'xo:postCall': this._handleEvent.bind(this, 'apiCall'),
}
}
configure({ active = false }, { loaded }) {
this._active = active
if (loaded) {
this._addListeners()
}
}
async load() {
@@ -111,8 +198,7 @@ class AuditXoPlugin {
this._storage = undefined
})
this._addListener('xo:postCall', this._handleEvent.bind(this, 'apiCall'))
this._addListener('xo:audit', this._handleEvent.bind(this))
this._addListeners()
const exportRecords = this._exportRecords.bind(this)
exportRecords.permission = 'admin'
@@ -143,10 +229,31 @@ class AuditXoPlugin {
oldest: { type: 'string', optional: true },
}
const uploadLastHashJob = this._uploadLastHashJob
if (uploadLastHashJob !== undefined) {
uploadLastHashJob.start()
cleaners.push(() => uploadLastHashJob.stop())
}
const clean = this._storage.clean.bind(this._storage)
clean.permission = 'admin'
clean.description = 'Clean audit database'
const deleteRange = this._deleteRangeAndRewrite.bind(this)
deleteRange.description =
'Delete a range of records and rewrite the records chain'
deleteRange.permission = 'admin'
deleteRange.params = {
newest: { type: 'string' },
oldest: { type: 'string', optional: true },
}
cleaners.push(
this._xo.addApiMethods({
audit: {
checkIntegrity,
clean,
deleteRange,
exportRecords,
generateFingerprint,
getRecords,
@@ -156,34 +263,44 @@ class AuditXoPlugin {
}
unload() {
this._removeListeners()
this._cleaners.forEach(cleaner => cleaner())
this._cleaners.length = 0
}
_addListener(event, listener_) {
const listener = async (...args) => {
try {
await listener_(...args)
} catch (error) {
log.error(error)
}
_addListeners(event, listener_) {
this._removeListeners()
if (this._active) {
const listeners = this._listeners
Object.keys(listeners).forEach(event => {
this._xo.addListener(event, listeners[event])
})
}
const xo = this._xo
xo.on(event, listener)
this._cleaners.push(() => xo.removeListener(event, listener))
}
_handleEvent(event, { userId, userIp, userName, ...data }) {
if (event !== 'apiCall' || !this._blockedList[data.method]) {
return this._auditCore.add(
{
userId,
userIp,
userName,
},
event,
data
)
_removeListeners() {
const listeners = this._listeners
Object.keys(listeners).forEach(event => {
this._xo.removeListener(event, listeners[event])
})
}
async _handleEvent(event, { userId, userIp, userName, ...data }) {
try {
if (event !== 'apiCall' || !this._blockedList[data.method]) {
return await this._auditCore.add(
{
userId,
userIp,
userName,
},
event,
data
)
}
} catch (error) {
log.error(error)
}
}
@@ -212,7 +329,7 @@ class AuditXoPlugin {
(req, res) => {
res.set({
'content-disposition': 'attachment',
'content-type': 'application/json',
'content-type': 'application/x-gzip',
})
return fromCallback(
pipeline,
@@ -225,7 +342,7 @@ class AuditXoPlugin {
{
suffix: `/audit-records-${new Date()
.toISOString()
.replace(/:/g, '_')}.gz`,
.replace(/:/g, '_')}.ndjson.gz`,
}
)
.then($getFrom => ({
@@ -233,6 +350,60 @@ class AuditXoPlugin {
}))
}
// See www-xo#344
async _uploadLastHash() {
const xo = this._xo
// In case of non-existent XOA plugin
if (xo.audit === undefined) {
return
}
const lastRecordId = await this._storage.getLastId()
if (lastRecordId === undefined) {
return
}
const chain = await xo.audit.getLastChain()
let lastValidHash
if (chain !== null) {
const hashes = chain.hashes
lastValidHash = hashes[hashes.length - 1]
if (lastValidHash === lastRecordId) {
return
}
// check the integrity of all stored hashes
try {
for (let i = 0; i < hashes.length - 1; ++i) {
await this._checkIntegrity({
oldest: hashes[i],
newest: hashes[i + 1],
})
}
} catch (error) {
if (!missingAuditRecord.is(error) && !alteredAuditRecord.is(error)) {
throw error
}
lastValidHash = undefined
}
}
// generate a valid fingerprint of all stored records in case of a failure integrity check
const { oldest, newest, error } = await this._generateFingerprint({
oldest: lastValidHash,
})
if (lastValidHash === undefined || error !== undefined) {
await xo.audit.startNewChain({ oldest, newest })
} else {
await xo.audit.extendLastChain({ oldest, newest })
}
}
async _checkIntegrity(props) {
const { oldest = NULL_ID, newest = await this._storage.getLastId() } = props
return this._auditCore.checkIntegrity(oldest, newest).catch(error => {
@@ -251,19 +422,30 @@ class AuditXoPlugin {
try {
return {
fingerprint: `${oldest}|${newest}`,
newest,
nValid: await this._checkIntegrity({ oldest, newest }),
oldest,
}
} catch (error) {
if (missingAuditRecord.is(error) || alteredAuditRecord.is(error)) {
return {
fingerprint: `${error.data.id}|${newest}`,
nValid: error.data.nValid,
error,
fingerprint: `${error.data.id}|${newest}`,
newest,
nValid: error.data.nValid,
oldest: error.data.id,
}
}
throw error
}
}
async _deleteRangeAndRewrite({ newest, oldest = newest }) {
await this._auditCore.deleteRangeAndRewrite(newest, oldest)
if (this._uploadLastHashJob !== undefined) {
await this._uploadLastHash()
}
}
}
AuditXoPlugin.prototype._getRecordsStream = asyncIteratorToStream(

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-ldap",
"version": "0.8.1",
"version": "0.9.0",
"license": "AGPL-3.0-or-later",
"description": "LDAP authentication plugin for XO-Server",
"keywords": [
@@ -34,6 +34,7 @@
"node": ">=10"
},
"dependencies": {
"@babel/plugin-proposal-optional-chaining": "^7.11.0",
"exec-promise": "^0.7.0",
"inquirer": "^7.0.0",
"ldapts": "^2.2.1",

View File

@@ -32,10 +32,12 @@ export const configurationSchema = {
type: 'object',
properties: {
uri: {
title: 'URI',
description: 'URI of the LDAP server.',
type: 'string',
},
certificateAuthorities: {
title: 'Certificate Authorities',
description: `
Paths to CA certificates to use when connecting to SSL-secured LDAP servers.
@@ -47,12 +49,24 @@ If not specified, it will use a default set of well-known CAs.
},
},
checkCertificate: {
title: 'Check certificate',
description:
"Enforce the validity of the server's certificates. You can disable it when connecting to servers that use a self-signed certificate.",
type: 'boolean',
defaults: DEFAULTS.checkCertificate,
},
startTls: {
title: 'Use StartTLS',
type: 'boolean',
},
base: {
title: 'Base',
description:
'The base is the part of the description tree where the users and groups are looked for.',
type: 'string',
},
bind: {
title: 'Credentials',
description: 'Credentials to use before looking for the user record.',
type: 'object',
properties: {
@@ -74,12 +88,8 @@ For Microsoft Active Directory, it can also be \`<user>@<domain>\`.
},
required: ['dn', 'password'],
},
base: {
description:
'The base is the part of the description tree where the users are looked for.',
type: 'string',
},
filter: {
title: 'User filter',
description: `
Filter used to find the user.
@@ -102,9 +112,67 @@ Or something like this if you also want to filter by group:
type: 'string',
default: DEFAULTS.filter,
},
startTls: {
title: 'Use StartTLS',
type: 'boolean',
userIdAttribute: {
title: 'ID attribute',
description:
'Attribute used to map LDAP user to XO user. Must be unique. e.g.: `dn`',
type: 'string',
},
groups: {
title: 'Synchronize groups',
description: 'Import groups from LDAP directory',
type: 'object',
properties: {
base: {
title: 'Base',
description: 'Where to look for the groups.',
type: 'string',
},
filter: {
title: 'Filter',
description:
'Filter used to find the groups. e.g.: `(objectClass=groupOfNames)`',
type: 'string',
},
idAttribute: {
title: 'ID attribute',
description:
'Attribute used to map LDAP group to XO group. Must be unique. e.g.: `gid`',
type: 'string',
},
displayNameAttribute: {
title: 'Display name attribute',
description:
"Attribute used to determine the group's name in XO. e.g.: `cn`",
type: 'string',
},
membersMapping: {
title: 'Members mapping',
type: 'object',
properties: {
groupAttribute: {
title: 'Group attribute',
description:
'Attribute used to find the members of a group. e.g.: `memberUid`. The values must reference the user IDs (cf. user ID attribute)',
type: 'string',
},
userAttribute: {
title: 'User attribute',
description:
'User attribute used to match group members to the users. e.g.: `uidNumber`',
type: 'string',
},
},
required: ['groupAttribute', 'userAttribute'],
},
},
required: [
'base',
'filter',
'idAttribute',
'displayNameAttribute',
'membersMapping',
],
},
},
required: ['uri', 'base'],
@@ -166,12 +234,18 @@ class AuthLdap {
base: searchBase,
filter: searchFilter = DEFAULTS.filter,
startTls = false,
groups,
uri,
userIdAttribute,
} = conf
this._credentials = credentials
this._serverUri = uri
this._searchBase = searchBase
this._searchFilter = searchFilter
this._startTls = startTls
this._groupsConfig = groups
this._userIdAttribute = userIdAttribute
}
load() {
@@ -238,7 +312,31 @@ class AuthLdap {
`successfully bound as ${entry.dn} => ${username} authenticated`
)
logger(JSON.stringify(entry, null, 2))
return { username }
let user
if (this._userIdAttribute === undefined) {
// Support legacy config
user = await this._xo.registerUser(undefined, username)
} else {
const ldapId = entry[this._userIdAttribute]
user = await this._xo.registerUser2('ldap', {
user: { id: ldapId, name: username },
})
const groupsConfig = this._groupsConfig
if (groupsConfig !== undefined) {
try {
await this._synchronizeGroups(
user,
entry[groupsConfig.membersMapping.userAttribute]
)
} catch(error) {
logger(`failed to synchronize groups: ${error.message}`)
}
}
}
return { userId: user.id }
} catch (error) {
logger(`failed to bind as ${entry.dn}: ${error.message}`)
}
@@ -250,6 +348,146 @@ class AuthLdap {
await client.unbind()
}
}
// Synchronize user's groups OR all groups if no user is passed
async _synchronizeGroups(user, memberId) {
const logger = this._logger
const client = new Client(this._clientOpts)
try {
if (this._startTls) {
await client.startTLS(this._tlsOptions)
}
// Bind if necessary.
{
const { _credentials: credentials } = this
if (credentials) {
logger(`attempting to bind with as ${credentials.dn}...`)
await client.bind(credentials.dn, credentials.password)
logger(`successfully bound as ${credentials.dn}`)
}
}
logger('syncing groups...')
const {
base,
displayNameAttribute,
filter,
idAttribute,
membersMapping,
} = this._groupsConfig
const { searchEntries: ldapGroups } = await client.search(base, {
scope: 'sub',
filter: filter || '', // may be undefined
})
const xoUsers =
user !== undefined &&
(await this._xo.getAllUsers()).filter(
user =>
user.authProviders !== undefined && 'ldap' in user.authProviders
)
const xoGroups = await this._xo.getAllGroups()
// For each LDAP group:
// - create/update/delete the corresponding XO group
// - add/remove the LDAP-provided users
// One by one to avoid race conditions
for (const ldapGroup of ldapGroups) {
const groupLdapId = ldapGroup[idAttribute]
const groupLdapName = ldapGroup[displayNameAttribute]
// Empty or undefined names/IDs are invalid
if (!groupLdapId || !groupLdapName) {
logger(`Invalid group ID (${groupLdapId}) or name (${groupLdapName})`)
continue
}
let ldapGroupMembers = ldapGroup[membersMapping.groupAttribute]
ldapGroupMembers = Array.isArray(ldapGroupMembers)
? ldapGroupMembers
: [ldapGroupMembers]
// If a user was passed, only update the user's groups
if (user !== undefined && !ldapGroupMembers.includes(memberId)) {
continue
}
let xoGroup
const xoGroupIndex = xoGroups.findIndex(
group =>
group.provider === 'ldap' && group.providerGroupId === groupLdapId
)
if (xoGroupIndex === -1) {
if (
xoGroups.find(group => group.name === groupLdapName) !== undefined
) {
// TODO: check against LDAP groups that are being created as well
logger(`A group called ${groupLdapName} already exists`)
continue
}
xoGroup = await this._xo.createGroup({
name: groupLdapName,
provider: 'ldap',
providerGroupId: groupLdapId,
})
} else {
// Remove it from xoGroups as we will then delete all the remaining
// LDAP-provided groups
;[xoGroup] = xoGroups.splice(xoGroupIndex, 1)
await this._xo.updateGroup(xoGroup.id, { name: groupLdapName })
xoGroup = await this._xo.getGroup(xoGroup.id)
}
// If a user was passed, only add that user to the group and don't
// delete any groups (ie return immediately)
if (user !== undefined) {
await this._xo.addUserToGroup(user.id, xoGroup.id)
continue
}
const xoGroupMembers =
xoGroup.users === undefined ? [] : xoGroup.users.slice(0)
for (const ldapId of ldapGroupMembers) {
const xoUser = xoUsers.find(
user => user.authProviders.ldap.id === ldapId
)
if (xoUser === undefined) {
continue
}
// If the user exists in XO, should be a member of the LDAP-provided
// group but isn't: add it
const userIdIndex = xoGroupMembers.findIndex(id => id === xoUser.id)
if (userIdIndex !== -1) {
xoGroupMembers.splice(userIdIndex, 1)
continue
}
await this._xo.addUserToGroup(xoUser.id, xoGroup.id)
}
// All the remaining users of that group can be removed from it since
// they're not in the LDAP group
for (const userId of xoGroupMembers) {
await this._xo.removeUserFromGroup(userId, xoGroup.id)
}
}
if (user === undefined) {
// All the remaining groups provided by LDAP can be removed from XO since
// they don't exist in the LDAP directory any more
await Promise.all(
xoGroups
.filter(group => group.provider === 'ldap')
.map(group => this._xo.deleteGroup(group.id))
)
}
} finally {
await client.unbind()
}
}
}
// ===================================================================

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-backup-reports",
"version": "0.16.6",
"version": "0.16.7",
"license": "AGPL-3.0-or-later",
"description": "Backup reports plugin for XO-Server",
"keywords": [
@@ -38,7 +38,7 @@
"dependencies": {
"@xen-orchestra/defined": "^0.0.0",
"@xen-orchestra/log": "^0.2.0",
"human-format": "^0.10.0",
"human-format": "^0.11.0",
"lodash": "^4.13.1",
"moment-timezone": "^0.5.13"
},

View File

@@ -513,6 +513,8 @@ class BackupReportsXoPlugin {
} else {
globalTransferSize += size
}
} else if (operationLog.status === 'success') {
return
}
const operationText = [

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-load-balancer",
"version": "0.3.2",
"version": "0.3.3",
"license": "AGPL-3.0-or-later",
"description": "Load balancer for XO-Server",
"keywords": [

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-perf-alert",
"version": "0.2.2",
"version": "0.2.3",
"license": "AGPL-3.0-or-later",
"description": "Sends alerts based on performance criteria",
"keywords": [],
@@ -22,7 +22,7 @@
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.6",
"d3-time-format": "^2.1.1",
"d3-time-format": "^3.0.0",
"json5": "^2.0.1",
"lodash": "^4.17.4"
},

View File

@@ -592,22 +592,11 @@ ${monitorBodies.join('\n')}`
const monitors = this._getMonitors()
for (const monitor of monitors) {
const snapshot = await monitor.snapshot()
for (const entry of snapshot) {
raiseOrLowerAlarm(
`${monitor.alarmId}|${entry.uuid}|RRD`,
entry.value === undefined,
() => {
this._sendAlertEmail(
'Secondary Issue',
`
## There was an issue when trying to check ${monitor.title}
${entry.listItem}`
)
},
() => {}
)
const entriesWithMissingStats = []
for (const entry of snapshot) {
if (entry.value === undefined) {
entriesWithMissingStats.push(entry)
continue
}
@@ -656,6 +645,23 @@ ${entry.listItem}
lowerAlarm
)
}
raiseOrLowerAlarm(
`${monitor.alarmId}|${entriesWithMissingStats
.map(({ uuid }) => uuid)
.sort()
.join('|')}|RRD`,
entriesWithMissingStats.length !== 0,
() => {
this._sendAlertEmail(
'Secondary Issue',
`
## There was an issue when trying to check ${monitor.title}
${entriesWithMissingStats.map(({ listItem }) => listItem).join('\n')}`
)
},
() => {}
)
}
}

View File

@@ -16,7 +16,7 @@
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"version": "0.4.2",
"version": "1.0.4",
"engines": {
"node": ">=8.10"
},
@@ -30,6 +30,9 @@
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"@xen-orchestra/openflow": "^0.1.1",
"@vates/coalesce-calls": "^0.1.0",
"ipaddr.js": "^1.9.1",
"lodash": "^4.17.11",
"node-openssl-cert": "^0.0.117",
"promise-toolbox": "^0.15.0",

View File

@@ -1,15 +1,17 @@
import assert from 'assert'
import createLogger from '@xen-orchestra/log'
import NodeOpenssl from 'node-openssl-cert'
import uuidv4 from 'uuid/v4'
import { access, constants, readFile, writeFile } from 'fs'
import { EventEmitter } from 'events'
import { filter, find, forOwn, map, omitBy } from 'lodash'
import { fromCallback, promisify } from 'promise-toolbox'
import { join } from 'path'
import { v4 as uuidv4 } from 'uuid'
import { OpenFlowChannel } from './protocol/openflow-channel'
import { OvsdbClient } from './protocol/ovsdb-client'
import { PrivateNetwork } from './private-network/private-network'
import { TlsHelper } from './utils/tls-helper'
// =============================================================================
@@ -48,6 +50,10 @@ export const configurationSchema = {
// =============================================================================
const noop = Function.prototype
// -----------------------------------------------------------------------------
const fileWrite = promisify(writeFile)
const fileRead = promisify(readFile)
async function fileExists(path) {
@@ -235,12 +241,28 @@ async function createTunnel(host, network) {
return
}
const encapsulation = otherConfig['xo:sdn-controller:encapsulation'] ?? 'gre'
try {
const tunnelRef = await host.$xapi.call(
'tunnel.create',
hostPif.$ref,
network.$ref
)
let tunnelRef
try {
tunnelRef = await host.$xapi.call(
'tunnel.create',
hostPif.$ref,
network.$ref,
encapsulation
)
} catch (error) {
if (error.code === 'MESSAGE_PARAMETER_COUNT_MISMATCH') {
// Before 8.2, protocol field did not exist, let's try without it!
tunnelRef = await host.$xapi.call(
'tunnel.create',
hostPif.$ref,
network.$ref
)
} else {
throw error
}
}
const tunnel = await host.$xapi._getOrWaitObject(tunnelRef)
await tunnel.$xapi._waitObjectState(
tunnel.access_PIF,
@@ -312,6 +334,10 @@ class SDNController extends EventEmitter {
- `status`:
- `active`: `true` if the corresponding OpenVSwitch bridge is correctly configured and working
- `key` : Corresponding OpenVSwitch bridge name (missing if `active` is `false`)
Attributes on VIFs (OpenFlow entries):
- `other_config`:
- `xo:sdn-controller:of-rules`: A list of openflow entries to aply to this VIF
*/
constructor({ xo, getDataDir }) {
@@ -335,6 +361,12 @@ class SDNController extends EventEmitter {
this._prevVni = 0
this.ovsdbClients = {}
this.ofChannels = {}
this._tlsHelper = new TlsHelper()
this._handledTasks = []
this._managed = []
}
// ---------------------------------------------------------------------------
@@ -369,10 +401,11 @@ class SDNController extends EventEmitter {
fileRead(join(certDirectory, CLIENT_CERT)),
fileRead(join(certDirectory, CA_CERT)),
])
forOwn(this.ovsdbClients, client => {
client.updateCertificates(this._clientKey, this._clientCert, this._caCert)
})
this._tlsHelper.updateCertificates(
this._clientKey,
this._clientCert,
this._caCert
)
const updatedPools = []
await Promise.all(
map(this.privateNetworks, async privateNetworks => {
@@ -390,7 +423,8 @@ class SDNController extends EventEmitter {
}
async load() {
// Expose method to create private network
// ---------------- Private Network method ---------------------------------
const createPrivateNetwork = params =>
this._createPrivateNetwork({
encrypted: false,
@@ -421,10 +455,41 @@ class SDNController extends EventEmitter {
mtu: { type: 'integer', optional: true },
preferredCenterId: { type: 'string', optional: true },
}
createPrivateNetwork.permission = 'admin'
// ---------------- OpenFlow rules method ----------------------------------
const addRule = params => this._addRule(params)
addRule.description = 'Add an ACL rule to a VIF'
addRule.params = {
allow: { type: 'boolean' },
direction: { type: 'string' },
ipRange: { type: 'string', optional: true },
port: { type: 'integer', optional: true },
protocol: { type: 'string', optional: true },
vifId: { type: 'string' },
}
addRule.permission = 'admin'
const deleteRule = params => this._deleteRule(params)
deleteRule.description = 'Delete an ACL rule from a VIF'
deleteRule.params = {
direction: { type: 'string' },
ipRange: { type: 'string', optional: true },
port: { type: 'integer', optional: true },
protocol: { type: 'string', optional: true },
vifId: { type: 'string' },
}
deleteRule.permission = 'admin'
// -------------------------------------------------------------------------
this._unsetApiMethods = this._xo.addApiMethods({
sdnController: {
createPrivateNetwork,
addRule,
deleteRule,
},
})
@@ -456,6 +521,10 @@ class SDNController extends EventEmitter {
this._cleaners = []
this.ovsdbClients = {}
this.ofChannels = {}
this._handledTasks = []
this._managed = []
this._unsetApiMethods()
}
@@ -483,7 +552,8 @@ class SDNController extends EventEmitter {
this._cleaners.push(await this._manageXapi(xapi))
const hosts = filter(xapi.objects.all, { $type: 'host' })
for (const host of hosts) {
this._createOvsdbClient(host)
this._getOrCreateOvsdbClient(host)
this._getOrCreateOfChannel(host)
}
// Add already existing private networks
@@ -596,6 +666,13 @@ class SDNController extends EventEmitter {
await this._electNewCenter(privateNetwork)
})
)
// -----------------------------------------------------------------------
const vifs = filter(xapi.objects.all, { $type: 'VIF' })
for (const vif of vifs) {
await this._applyVifOfRules(vif)
}
} catch (error) {
log.error('Error while handling xapi connection', {
id: xapi.pool.uuid,
@@ -632,6 +709,112 @@ class SDNController extends EventEmitter {
// ===========================================================================
async _addRule({ allow, direction, ipRange = '', port, protocol, vifId }) {
const vif = this._xo.getXapiObject(this._xo.getObject(vifId, 'VIF'))
try {
assert(vif.currently_attached, 'VIF needs to be plugged to add rule')
await this._setPoolControllerIfNeeded(vif.$pool)
const client = this._getOrCreateOvsdbClient(vif.$VM.$resident_on)
const channel = this._getOrCreateOfChannel(vif.$VM.$resident_on)
const ofport = await client.getOfPortForVif(vif)
await channel.addRule(
vif,
allow,
protocol,
port,
ipRange,
direction,
ofport
)
const vifRules = vif.other_config['xo:sdn-controller:of-rules']
const newVifRules = vifRules !== undefined ? JSON.parse(vifRules) : []
const stringRule = JSON.stringify({
allow,
protocol,
port,
ipRange,
direction,
})
if (!newVifRules.includes(stringRule)) {
newVifRules.push(stringRule)
await vif.update_other_config(
'xo:sdn-controller:of-rules',
JSON.stringify(newVifRules)
)
}
} catch (error) {
log.error('Error while adding OF rule', {
error,
vif: vif.uuid,
host: vif.$VM.$resident_on.uuid,
allow,
protocol,
port,
ipRange,
direction,
})
}
}
async _deleteRule(
{ direction, ipRange = '', port, protocol, vifId },
updateOtherConfig = true
) {
let vif = this._xo.getXapiObject(this._xo.getObject(vifId, 'VIF'))
try {
await this._setPoolControllerIfNeeded(vif.$pool)
const client = this._getOrCreateOvsdbClient(vif.$VM.$resident_on)
const channel = this._getOrCreateOfChannel(vif.$VM.$resident_on)
const ofport = await client.getOfPortForVif(vif)
await channel.deleteRule(vif, protocol, port, ipRange, direction, ofport)
if (!updateOtherConfig) {
return
}
const vifRules = vif.other_config['xo:sdn-controller:of-rules']
if (vifRules === undefined) {
// Nothing to do
return
}
const newVifRules = JSON.parse(vifRules).filter(vifRule => {
const rule = JSON.parse(vifRule)
return (
rule.protocol !== protocol ||
rule.port !== port ||
rule.ipRange !== ipRange ||
rule.direction !== direction
)
})
await vif.update_other_config(
'xo:sdn-controller:of-rules',
Object.keys(newVifRules).length === 0
? null
: JSON.stringify(newVifRules)
)
vif = await vif.$xapi.barrier(vif.$ref)
// Put back rules that could have been wrongfully deleted because delete rule too general
await this._applyVifOfRules(vif)
} catch (error) {
log.error('Error while adding OF rule', {
error,
vif: vif.uuid,
host: vif.$VM.$resident_on.uuid,
protocol,
port,
ipRange,
direction,
})
}
}
// ---------------------------------------------------------------------------
async _createPrivateNetwork({
poolIds,
pifIds,
@@ -661,8 +844,6 @@ class SDNController extends EventEmitter {
const privateNetwork = new PrivateNetwork(this, uuidv4(), preferredCenter)
for (const pool of pools) {
await this._setPoolControllerIfNeeded(pool)
const pifId = pifIds.find(id => {
const pif = this._xo.getXapiObject(this._xo.getObject(id, 'PIF'))
return pif.$pool.$ref === pool.$ref
@@ -703,9 +884,11 @@ class SDNController extends EventEmitter {
await Promise.all(
map(hosts, async host => {
await createTunnel(host, createdNetwork)
this._createOvsdbClient(host)
this._getOrCreateOvsdbClient(host)
this._getOrCreateOfChannel(host)
})
)
await this._setPoolControllerIfNeeded(pool)
await privateNetwork.addNetwork(createdNetwork)
this._networks.set(createdNetwork.$id, createdNetwork.$ref)
@@ -721,6 +904,10 @@ class SDNController extends EventEmitter {
// ---------------------------------------------------------------------------
async _manageXapi(xapi) {
if (this._managed.includes(xapi.pool.uuid)) {
return noop // pushed in _cleaners
}
const { objects } = xapi
const objectsRemovedXapi = this._objectsRemoved.bind(this, xapi)
@@ -729,6 +916,7 @@ class SDNController extends EventEmitter {
objects.on('remove', objectsRemovedXapi)
await this._installCaCertificateIfNeeded(xapi)
this._managed.push(xapi.pool.uuid)
return () => {
objects.removeListener('add', this._objectsAdded)
@@ -738,7 +926,7 @@ class SDNController extends EventEmitter {
}
_objectsAdded(objects) {
forOwn(objects, object => {
forOwn(objects, async object => {
const { $type } = object
if ($type === 'host') {
@@ -750,7 +938,18 @@ class SDNController extends EventEmitter {
if (!this._newHosts.some(_ => _.$ref === object.$ref)) {
this._newHosts.push(object)
}
this._createOvsdbClient(object)
this._getOrCreateOvsdbClient(object)
this._getOrCreateOfChannel(object)
} else if ($type === 'PIF') {
log.debug('New PIF', {
device: object.device,
host: object.$host.name_label,
network: object.$network.name_label,
pool: object.$pool.name_label,
})
const client = this.ovsdbClients[object.host]
client.setBridgeControllerForNetwork(object.$network)
}
})
}
@@ -765,6 +964,10 @@ class SDNController extends EventEmitter {
await this._hostUpdated(object)
} else if ($type === 'host_metrics') {
await this._hostMetricsUpdated(object)
} else if ($type === 'VM') {
await this._vmUpdated(object)
} else if ($type === 'VIF') {
await this._vifUpdated(object)
}
} catch (error) {
log.error('Error in _objectsUpdated', {
@@ -782,6 +985,10 @@ class SDNController extends EventEmitter {
this.ovsdbClients,
client => client.host.$id === id
)
this.ofChannels = omitBy(
this.ofChannels,
channel => channel.host.$id === id
)
// If a Star center host is removed: re-elect a new center where needed
const starCenterRef = this._starCenters.get(id)
@@ -898,6 +1105,8 @@ class SDNController extends EventEmitter {
})
}
this._setBridgeControllerForHost(host)
const privateNetworks = filter(
this.privateNetworks,
privateNetwork => privateNetwork[host.$pool.uuid] !== undefined
@@ -928,6 +1137,64 @@ class SDNController extends EventEmitter {
return this._hostUnreachable(ovsdbClient.host)
}
async _vmUpdated(vm) {
forOwn(vm.current_operations, async (value, key) => {
if (this._handledTasks.includes(key)) {
return
}
this._handledTasks.push(key)
// Clean before task ends
if (
value === 'migrate_send' ||
value === 'pool_migrate' ||
value === 'clean_reboot' ||
value === 'hard_reboot' ||
value === 'hard_shutdown' ||
value === 'clean_shutdown'
) {
await this._cleanOfRules(vm)
}
await vm.$xapi.watchTask(key).catch(noop)
// Re-apply rules after task ended
if (
value === 'migrate_send' ||
value === 'pool_migrate' ||
value === 'clean_reboot' ||
value === 'hard_reboot' ||
value === 'start' ||
value === 'start_on'
) {
vm = await vm.$xapi.barrier(vm.$ref)
await this._applyOfRules(vm)
}
this._handledTasks = filter(this._handledTasks, ref => ref !== key)
})
}
async _vifUpdated(vif) {
await Promise.all(
map(vif.current_operations, async (value, key) => {
if (this._handledTasks.includes(key)) {
return
}
this._handledTasks.push(key)
if (value === 'plug') {
await vif.$xapi.watchTask(key).catch(noop)
vif = await vif.$xapi.barrier(vif.$ref)
await this._applyVifOfRules(vif)
} else if (value === 'unplug' || value === 'unplug_force') {
await this._cleanVifOfRules(vif)
await vif.$xapi.watchTask(key).catch(noop)
}
this._handledTasks = filter(this._handledTasks, ref => ref !== key)
})
)
}
// ---------------------------------------------------------------------------
async _setPoolControllerIfNeeded(pool) {
@@ -948,9 +1215,20 @@ class SDNController extends EventEmitter {
})
}
const hosts = filter(pool.$xapi.objects.all, { $type: 'host' })
await Promise.all(
hosts.map(host => {
return this._setBridgeControllerForHost(host)
})
)
this._cleaners.push(await this._manageXapi(pool.$xapi))
}
_setBridgeControllerForHost(host) {
const client = this.ovsdbClients[host.$ref]
return client.setBridgeController()
}
// ---------------------------------------------------------------------------
async _installCaCertificateIfNeeded(xapi) {
@@ -1116,18 +1394,60 @@ class SDNController extends EventEmitter {
// ---------------------------------------------------------------------------
_createOvsdbClient(host) {
if (this.ovsdbClients[host.$ref] !== undefined) {
async _applyVifOfRules(vif) {
if (!vif.currently_attached) {
return
}
const client = new OvsdbClient(
host,
this._clientKey,
this._clientCert,
this._caCert
)
this.ovsdbClients[host.$ref] = client
const vifRules = vif.other_config['xo:sdn-controller:of-rules']
const parsedRules = vifRules !== undefined ? JSON.parse(vifRules) : []
for (const stringRule of parsedRules) {
const rule = JSON.parse(stringRule)
await this._addRule({ ...rule, vifId: vif.$id })
}
}
async _cleanVifOfRules(vif) {
const vifRules = vif.other_config['xo:sdn-controller:of-rules']
const parsedRules = vifRules !== undefined ? JSON.parse(vifRules) : []
for (const stringRule of parsedRules) {
const rule = JSON.parse(stringRule)
await this._deleteRule({ ...rule, vifId: vif.$id }, false)
}
}
async _cleanOfRules(vm) {
for (const vif of vm.$VIFs) {
await this._cleanVifOfRules(vif)
}
}
async _applyOfRules(vm) {
for (const vif of vm.$VIFs) {
await this._applyVifOfRules(vif)
}
}
// ---------------------------------------------------------------------------
_getOrCreateOvsdbClient(host) {
let client = this.ovsdbClients[host.$ref]
if (client === undefined) {
client = new OvsdbClient(host, this._tlsHelper)
this.ovsdbClients[host.$ref] = client
}
return client
}
_getOrCreateOfChannel(host) {
let channel = this.ofChannels[host.$ref]
if (channel === undefined) {
channel = new OpenFlowChannel(host, this._tlsHelper)
this.ofChannels[host.$ref] = channel
}
return channel
}
}

View File

@@ -1,3 +1,4 @@
import assert from 'assert'
import createLogger from '@xen-orchestra/log'
import { filter, forOwn, sample } from 'lodash'
@@ -61,13 +62,39 @@ export class PrivateNetwork {
otherConfig['xo:sdn-controller:encrypted'] === 'true'
? createPassword()
: undefined
const pifDevice = otherConfig['xo:sdn-controller:pif-device']
const pifVlan = +otherConfig['xo:sdn-controller:vlan']
const hostPif = hostClient.host.$PIFs.find(
pif =>
pif?.device === pifDevice &&
pif.VLAN === pifVlan &&
pif.ip_configuration_mode !== 'None'
)
const centerPif = centerClient.host.$PIFs.find(
pif =>
pif?.device === pifDevice &&
pif.VLAN === pifVlan &&
pif.ip_configuration_mode !== 'None'
)
assert(hostPif !== undefined, 'No PIF found', {
privateNetwork: this.uuid,
pifDevice,
pifVlan,
host: host.name_label,
})
assert(centerPif !== undefined, 'No PIF found in center', {
privateNetwork: this.uuid,
pifDevice,
pifVlan,
host: this.center.name_label,
})
let bridgeName
try {
;[bridgeName] = await Promise.all([
hostClient.addInterfaceAndPort(
network,
centerClient.host.address,
centerPif.IP,
encapsulation,
vni,
password,
@@ -75,7 +102,7 @@ export class PrivateNetwork {
),
centerClient.addInterfaceAndPort(
centerNetwork,
hostClient.host.address,
hostPif.IP,
encapsulation,
vni,
password,

Some files were not shown because too many files have changed in this diff Show More