Compare commits

...

149 Commits

Author SHA1 Message Date
BenjiReis
c240884ea4 WIP 2020-10-08 11:36:48 +02:00
badrAZ
a99086b6bd chore(xo-web/log-alert-body): remove unused subscription (#5292)
Introduced By 3370014ddf
2020-10-07 15:17:29 +02:00
badrAZ
a186672447 feat(xo-web/sorted-table): ability to collapse actions (#5311)
See #5148
2020-10-07 11:13:10 +02:00
badrAZ
0b8a7c0d09 feat(xo-web/backup/new): enable created schedules by default (#5280)
See xoa-support#2921
2020-10-07 10:36:10 +02:00
BenjiReis
1990bf3d7a fix(sdn-controller): use correct bridge address to create tunnels (#5281)
Fixes xoa-support#2919
2020-10-06 23:54:02 +02:00
Julien Fontanet
ea74a7e401 fix(backups-cli clean-vms): fix limit-concurrency-decorator import 2020-10-06 18:35:35 +02:00
Nicolas Raynaud
bf12c3ff74 fix(xo-server/backup-ng): use getRemoteWithCredentials (#5315)
Introduced in #4907

Fix #5253

Otherwise the handler will be incorrectly defined due to the obfuscation of credentials.
2020-10-06 18:13:27 +02:00
Rajaa.BARHTAOUI
9d261aae76 fix(xo-web/home): hide backup filter for non-admin users (#5287)
See #5285
2020-10-06 16:47:20 +02:00
badrAZ
3d8c8fd745 feat(xo-web/backup-reports): hide merge task when no merge (#5263) 2020-10-06 16:08:12 +02:00
Rajaa.BARHTAOUI
6ad7db522a fix(xo-web/vm/disks): fix "not enough permissions" error (#5299)
Introduced by 1116530a6b
2020-10-06 15:16:27 +02:00
Rajaa.BARHTAOUI
385984b1d8 fix(xo-web/vm/disks): VDI disappears after migration (#5296)
Related to 1116530a6b
2020-10-06 10:38:33 +02:00
Mathieu
4f3d4b06b5 fix(xo-web/user): fix custom filters in default filter select (#5298) 2020-10-02 14:40:17 +02:00
badrAZ
2291986e2c fix(xo-web/new/network): omit bond slave PIFs from selection (#5262)
See xcp-ng.org/forum/topic/3524/old-networks-not-removed-after-creating-bond/8
2020-10-02 14:12:53 +02:00
Nicolas Raynaud
fc81cf4d70 fix(xo-web/remotes): fix S3 secret key edit UI (#5305)
Fixes #5233
2020-10-02 11:38:18 +02:00
Pierre Donias
fdeab86a87 fix(xo-web/xoa): don't show expired notifications (#5304) 2020-10-01 16:40:27 +02:00
Julien Fontanet
3616b7a67b feat(xo-server/sensitive-values): make obfuscated value obvious 2020-10-01 15:01:21 +02:00
Julien Fontanet
83ea57d825 feat(xo-server/vm.migrate): ensure original error is logged
Fixes https://github.com/vatesfr/xen-orchestra/pull/4364/files#r488539823
2020-10-01 14:52:42 +02:00
Mathieu
24a69bcade feat(xo-server,xo-web/host/advanced): add IOMMU state (#5294)
Fixes #4936
2020-10-01 10:40:10 +02:00
badrAZ
58dc3244be feat: release 5.51.0 (#5295) 2020-09-30 14:19:35 +02:00
badrAZ
61e580b992 feat: technical release (#5293) 2020-09-29 16:40:26 +02:00
Rajaa.BARHTAOUI
1116530a6b feat(xo-web/vm/disks): ability to migrate VDIs to other SRs within resource set (#5201)
See #5020
2020-09-29 16:07:10 +02:00
Pierre Donias
8cfaabedeb feat(xo-server-auth-ldap): import LDAP groups (#5279)
See #1884

When a user logs into XO using LDAP:

- Create a XO user if it doesn't already exist
- Mark it as being provided by LDAP and bind it to the LDAP user with an ID
- If group synchronization is enabled:
  - Fetch all the LDAP groups based on the Base and Filter (plugin config)
  - Create and delete the corresponding XO groups based on the LDAP groups that
    were found
  - Add and remove the XO users from the XO groups based on the LDAP data
2020-09-29 15:35:30 +02:00
Rajaa.BARHTAOUI
66ba05dcd0 feat(xo-web/tasks): display linked objects (#5267)
Fixes #4275
2020-09-29 15:13:56 +02:00
Julien Fontanet
d1db616d1e feat(backups-cli/clean-vms): limit VHD merging concurrency to 1 2020-09-29 10:43:48 +02:00
Nicolas Raynaud
aed09b152a fix(import/ova): speedup the import of gziped vmdk disks nested in .ova (#5275)
This the followup to #5085

Avoid unzipping the entire file from the beginning before each read.
The test case when from 10min down to 26 seconds.

When reading a block from the gzipped file, we keep the current state in memory, if the next read happens at an offset greater than the previous read, we just carry one decompressing the file until the desired position.

The previous code would decompress from the start of the file for every read operation.
2020-09-28 15:42:55 +02:00
Nicolas Raynaud
f755365e23 fix(xo-web/remotes): fix editing bucket and directory for S3 (#5276) 2020-09-28 15:33:06 +02:00
badrAZ
ccd34c1610 fix(CHANGELOG): update advanced filter entry (#5290)
Introduced by 45fe70f0fa
2020-09-28 12:12:15 +02:00
badrAZ
f9104e6cc9 fix(xo-web/messages): remove unused messages (#5289)
Introduced by 45fe70f0fa
2020-09-28 12:10:21 +02:00
Nicolas Raynaud
4bb702fe89 fix(fs/S3): support 50GB+ files (#5242) 2020-09-27 20:49:41 +02:00
Julien Fontanet
511a04dad5 feat(xo-server-auth-ldap/configuration): add titles and reorder settings 2020-09-27 20:37:00 +02:00
Nicolas Raynaud
f3527a44d7 fix(vm/import): make the UI respond instantly to .ova file drop (#5274)
When dropping a file on the import zone, the UI didn't acknowledge the file until the vmdk table were parsed.

Now the UI parses the XML instantly, displays it on the UI, and start parsing the tables in the background. If the user clicks the "import" button, the system will finish parsing the tables and start the upload in when fell swoop. Hiding the parsing time in the upload time.
2020-09-27 19:45:32 +02:00
Julien Fontanet
fdbe84cb1e chore(xo-server-test): format with Prettier 2020-09-27 19:42:35 +02:00
badrAZ
45fe70f0fa feat(xo-web/logs/backup-ng): advanced filter (#5208)
See #4406
2020-09-25 16:58:33 +02:00
badrAZ
2aed2fd534 feat(xo-web/logs/backup-ng): log tasks pagination (#5209)
See #4406
2020-09-24 16:36:18 +02:00
Julien Fontanet
a523fa9733 feat(@xen-orchestra/backups-cli): 0.2.0 2020-09-24 15:14:07 +02:00
Julien Fontanet
0f42f032e4 feat(backups-cli/info): compute the used space per job 2020-09-24 15:12:38 +02:00
Dom Del Nano
4575b98fd5 fix(xo-server#removeSubjectToResourceSet): rename to removeSubjectFromResourceSet (#5266) 2020-09-21 17:50:55 +02:00
Pierre Donias
3a0cc0d6f6 fix(xo-server/subjects/addToArraySet): dont erase previous values (#5269)
Prior to this change, adding a value to an existing set that already contains
that value would replace the whole set with a new one containing only that
value.
2020-09-21 12:14:50 +02:00
Pierre Donias
626e2fcb12 fix(xo-server/users): serialize properties on user create as well (#5273)
This didn't break anything because we usually don't assign `groups` and/or
`preferences` (which are the only 2 properties that need serialization) on user
creation.

This also prepares a minimal change to add a `authProviders` object property on
users.
2020-09-18 12:11:20 +02:00
Pierre Donias
592feb54b7 fix(xo-server/_authenticateUser): remove broken/unused provider API (#5270) 2020-09-18 10:54:20 +02:00
badrAZ
9c6b63e7e4 feat: release 5.50.3 (#5272) 2020-09-17 16:19:30 +02:00
badrAZ
4364a74b7a feat: technical release (patch) (#5271) 2020-09-17 16:07:58 +02:00
badrAZ
00f13102f8 feat(xo-server-audit): API method to clean DB (#5150) 2020-09-17 15:03:03 +02:00
badrAZ
3f17389871 fix(xo-server-audit): remove unused variable (#5268) 2020-09-17 10:51:09 +02:00
Julien Fontanet
726ba287b1 chore(xo-server-audit/_uploadLastHash): simplify conditions 2020-09-17 10:07:32 +02:00
Julien Fontanet
42ee29cb3c fix(xo-server-audit/_uploadLastHash): dont hide errors 2020-09-17 10:06:28 +02:00
badrAZ
8a98b6b012 feat(xo-server-audit/_uploadLastHash): check integrity sequentially (#5250)
- implementation is simpler
- stop on first error
2020-09-17 09:35:42 +02:00
Julien Fontanet
14ab694804 fix(xo-cli): mkdirp must not be promisified
Introduced by d622f7a65
2020-09-16 15:56:38 +02:00
Nicolas Raynaud
14b8cda543 fix(xo-vmdk-to-vhd/grabTables): read each entry independently (#5255)
Reading all entries at once cause problems on some VMDKs (those generated by VirtualBox) because they appear to be distributed throughout the VMDK thus making the buffer not fit in memory.

See https://xcp-ng.org/forum/topic/3374/cannot-import-ova-from-virtualbox/14?_=1599689219209
2020-09-16 11:46:10 +02:00
Julien Fontanet
4264e34ffd feat(xo-web/createSubscription): support lazy subscribers (#5158)
These subscribers follow the value of the subscription but do not make the
subscription refresh itself.

A lazy subscriber triggers an initial fetch if no value is available.
2020-09-16 10:49:54 +02:00
Pierre Donias
bd9bf55e43 feat(xo-web/groups): bulk deletion (#5264) 2020-09-16 10:46:35 +02:00
Albin Hedman
7c802bbd33 feat(xo-web/dashboard/health): add 'too many snapshots' section (#5238) 2020-09-14 10:45:37 +02:00
Julien Fontanet
9e37f3f586 feat(xo-web/new VM): hide missing VDIs
See #5222

Related to 15bc30a2d
2020-09-11 11:11:25 +02:00
Nicolas Raynaud
1d4f5d068a fix(xo-web/VM import): make description optional (#5258) 2020-09-11 08:45:10 +02:00
badrAZ
5be5eb80e8 feat: release 5.50.2 (#5257) 2020-09-10 17:03:26 +02:00
Julien Fontanet
12c774a34a feat(travis-tests): dont stop on first failure on master 2020-09-10 16:42:56 +02:00
badrAZ
14c3fa4378 feat: technical release (patch) (#5256) 2020-09-10 16:36:25 +02:00
badrAZ
2f17420721 feat(xo-web/backup/overview): add link from log to its job (#5202)
See #4564
2020-09-10 16:05:36 +02:00
badrAZ
8d7f8d156f fix(xo-server,xo-web/orphan VDIs): ignore irrelevant VDI types (#5249)
Fixes #5248
2020-09-10 16:00:20 +02:00
Julien Fontanet
38248d8c35 fix(test): auto map @xen-orchestra/*, @vates/* and xo-* 2020-09-10 15:32:08 +02:00
badrAZ
edaae02892 fix(xo-web,xo-server#probeIscsiLuns): handle undefined lun size (#5212)
See xoa-support#2815
See https://xcp-ng.org/forum/topic/3409/getting-error-when-trying-to-mount-iscsi-lun
2020-09-09 12:11:50 +02:00
Rajaa.BARHTAOUI
846eff4984 feat(xo-web/vm/networks): improve tooltip messages (#5227)
See https://github.com/vatesfr/xen-orchestra/issues/4713#issuecomment-667655321
2020-09-09 11:59:54 +02:00
badrAZ
481adf3a1e feat(xo-server-audit): don't save last hash when it doesn't change (#5251) 2020-09-08 14:35:19 +02:00
Julien Fontanet
d622f7a65c chore: update dependencies 2020-09-07 10:26:51 +02:00
badrAZ
a479501aef feat: release 5.50.1 (#5246) 2020-09-04 12:04:55 +02:00
badrAZ
2456374e5a feat: technical release (patch) (#5247) 2020-09-04 11:55:01 +02:00
badrAZ
c77016ea44 feat(xo-server-usage-report): ignore replicated VMs (#5241)
Fixes #4778
2020-09-04 11:42:32 +02:00
badrAZ
6fd45a37e2 feat: technical release (patch) (#5245) 2020-09-04 11:16:45 +02:00
badrAZ
9be56d3ab8 fix(xo-server-audit): handle non-existent XOA plugin (#5239)
See 38de5048bc (commitcomment-41875481)
2020-09-04 10:31:46 +02:00
Julien Fontanet
24b264b6c9 fix(xo-server): TX checksumming is enabled by default
Introduced by fe2de9c1154c5a0b183c6d7897f1d9c376fa4031

Fixes #5234
2020-08-28 16:01:26 +02:00
tonyuh
7f9130470b feat(docs/backup_troubleshooting): SR_OPERATION_NOT_SUPPORTED (#5232)
Add SR_OPERATION_NOT_SUPPORTED error troubleshooting.
2020-08-28 11:49:47 +02:00
Julien Fontanet
b82aa1daa5 feat: release 5.50 2020-08-27 15:06:49 +02:00
Julien Fontanet
53cb325974 feat: technical release 2020-08-27 14:46:33 +02:00
Julien Fontanet
1256c320e3 feat(xo-web/host/network): button to scan PIFs
Fixes #5230
2020-08-27 11:42:28 +02:00
Julien Fontanet
15bc30a2d5 feat(xo-web/iso-device): hide missing VDIs
Fixes #5222
2020-08-27 11:13:59 +02:00
Julien Fontanet
fc3bc8468f feat(xo-server/backup): add proxyId to job log 2020-08-27 10:54:44 +02:00
Julien Fontanet
b4e068f630 chore(xo-server/backup executor): proxyId = job.proxy 2020-08-26 16:50:33 +02:00
Pierre Donias
08eef80673 feat(xo-web/orphan VDIs): show VDI(-snapshot)s that don't have VBDs (#5228)
In the Dashboard > Health > Orphan VDIs table, show non-ISO VDIs and
VDI-snapshot that don't have any VBDs.
2020-08-25 09:32:21 +02:00
Pierre Donias
152f73ebf0 feat: technical release (#5226) 2020-08-20 15:15:34 +02:00
badrAZ
38de5048bc feat(xo-server-audit): backup last hash (#5077) 2020-08-20 12:32:22 +02:00
Rajaa.BARHTAOUI
c4d96fbc49 feat(xo-web/vm/network): ability to change VIF locking mode (#5188)
See #4713
2020-08-20 09:48:50 +02:00
BenjiReis
ff25d402c1 fix(sdn-controller): host.$PIFs can have undefined element (#5217) 2020-08-20 09:25:41 +02:00
Julien Fontanet
f957024605 chore(xo-web): use js-cookie instead of cookies-js (#5224)
See #5223

`cookies-js` is no longer maintained.
2020-08-19 17:18:03 +02:00
badrAZ
006e54e2fd feat(xo-web, xo-server/proxy): improve proxy health check errors (#5191)
Fixes #5161
2020-08-19 16:20:05 +02:00
badrAZ
5f7bc58788 fix(xo-server/sensitive-values): obfuscate params containing "password" (#5220)
Fixes #5219
2020-08-19 10:56:40 +02:00
Fabian Untermoser
bdd93603aa fix(docs/backups): fix typo (#5225) 2020-08-18 21:46:30 +02:00
Rajaa.BARHTAOUI
8392a17cb2 fix(xo-server/authentication): add missing 'createPredicate' (#5221)
Fixes #5218
Introduced by 9ded2641a7
2020-08-18 16:15:35 +02:00
Julien Fontanet
5f7f0b777e fix(xo-server): use http.cookies config everywhere 2020-08-17 11:06:45 +02:00
Julien Fontanet
3f574606d9 feat(backups-cli/clean-vms): display merge progress 2020-08-17 11:06:45 +02:00
badrAZ
45f0f93895 feat(xo-server,xo-web/VM): ability to set VIF TX checksumming (#5182)
Fixes #5095
See xoa-support#2619
2020-08-14 17:14:48 +02:00
badrAZ
af2710135b fix(xo-web/proxies): remove upgrade button style (#5216)
See https://github.com/vatesfr/xen-orchestra/pull/5167/files?file-filters%5B%5D=.md#r469763760

...when the proxy is already up to date
2020-08-13 16:52:29 +02:00
Rajaa.BARHTAOUI
95ed6094fe fix(xo-web/vm/snapshot): fix redirection when copying a VM (#5213)
Introduced by d9211053ce
2020-08-13 11:12:56 +02:00
BenjiReis
6af8ce9eeb feat(sdn-controller): specify tunnel protocol at its creation (#5210) 2020-08-13 11:11:11 +02:00
badrAZ
3ff37f00fe fix(xo-web/deploy-proxy): throw error on trial start failure (#5196)
Introduced by 902953a1fa
2020-08-12 15:11:49 +02:00
Pierre Donias
ed5b066cbe fix(CHANGELOG.unreleased): remove sdn-controller package (#5214)
Introduced by cec5593c70
2020-08-12 12:21:18 +02:00
Mark Martin
cec5593c70 feat(xo-web/vm): protect from accidental shutdown (#5107)
Fixes #5090
2020-08-12 12:17:30 +02:00
Julien Fontanet
04924884ad feat(@xen-orchestra/backups-cli): 0.1.0 2020-08-11 09:57:01 +02:00
Julien Fontanet
3ccf64fcd3 feat(backups-cli/clean-vms): merge single children 2020-08-11 09:56:25 +02:00
Julien Fontanet
8eb7f9b91c fix(xo-server-sdn-controller): remove deprecated uuid import 2020-08-10 11:15:40 +02:00
Julien Fontanet
f25c50c629 chore: update dev deps 2020-08-10 11:15:40 +02:00
Rajaa.BARHTAOUI
e524a1b865 feat: release 5.49.1 (#5207) 2020-08-05 14:46:12 +02:00
Rajaa.BARHTAOUI
ac15e3355e feat: technical release (#5206)
* feat(xo-server-sdn-controller): 1.0.2

* feat(xo-web): 5.67.0

* chore(CHANGELOG): update next
2020-08-05 13:39:50 +02:00
Julien Fontanet
0930a37819 fix(CHANGELOG.unreleased): add xo-server patch
Due to 2789ead99
2020-08-05 12:33:30 +02:00
Rajaa.BARHTAOUI
d62f91a9e6 feat(xo-web/sr/advanced): show thin/thick provisioning for missing SR types (#5204) 2020-08-05 11:15:55 +02:00
Julien Fontanet
2789ead999 fix(xo-server/pool.listMissingPatches): really dont log errors
Previous fix (e1bf68ab3) was incorrect.
2020-08-05 11:10:35 +02:00
BenjiReis
f25fd267dd fix(sdn-controller): only admin can create private networks (#5200) 2020-08-04 14:09:33 +02:00
BenjiReis
47999f1f72 doc(sdn-controller): indicate correct version in OpenFlow requirements (#5199) 2020-08-03 16:47:13 +02:00
Pierre Donias
095bbcd15c feat: release 5.49.0 (#5195) 2020-07-31 14:14:32 +02:00
Pierre Donias
9177bb8451 feat: technical release (patch) (#5194) 2020-07-31 09:41:11 +02:00
Pierre Donias
119bf9b0ff feat(xo-web/VM/network): click on IP address to copy it (#5186)
Fixes #5185
2020-07-31 09:17:37 +02:00
Julien Fontanet
015c6037c4 fix: add prepublishOnly to replace removed prepare scripts
Introduced by 452a7e744
2020-07-30 19:07:36 +02:00
Adam Stankiewicz
452a7e7445 feat(test): remove need for prepare scripts (#5192) 2020-07-30 18:56:13 +02:00
Nicolas Raynaud
407586e2d5 feat(remotes): AWS S3 backup storage (#5037) 2020-07-30 16:47:04 +02:00
badrAZ
ffa431a3cd fix(xo-web/vm/tab-network): add default value to plugins (#5190)
Introduced by 2a74a49995

Plugins can be `undefined` on fetching which triggers the error `Cannot read property "some" of undefined`
2020-07-30 14:57:06 +02:00
Julien Fontanet
281a5ff991 chore: remove unused JSHint comments 2020-07-30 14:32:19 +02:00
BenjiReis
92db9bd284 fix(xo-server-sdn-controller): deactive DH for TLS connections (#5187)
Fixes #5074
2020-07-30 13:17:40 +02:00
Julien Fontanet
ea8f319f45 feat(self-signed): expose days option 2020-07-30 11:20:57 +02:00
Olivier Lambert
a11e9fe04e fix(changelog): caps typo (#5184) 2020-07-29 16:58:10 +02:00
Pierre Donias
27367bd1fc fix(CHANGELOG): wrong xo-server version (#5183) 2020-07-29 16:50:16 +02:00
Pierre Donias
c6f48ae054 feat: technical release (#5181) 2020-07-29 16:32:55 +02:00
Rajaa.BARHTAOUI
7d6efe3694 feat(xo-web/vm/network): improve the VIF locking mode feedback (#5170)
See #4713
2020-07-29 15:55:59 +02:00
Rajaa.BARHTAOUI
f4aad05edc feat(xo-web/backup): show warning if min(fullBackupInterval, retention)>50 (#5144)
See https://xcp-ng.org/forum/post/27539
2020-07-29 15:30:38 +02:00
Pierre Donias
d8f7637ca0 feat(xo-web/self): ability to cancel edition of resource set (#5174)
See xoa-support#2767
2020-07-29 13:59:45 +02:00
badrAZ
f9a7bd199e fix(xo-server#createVm): change network boot priority (#5119)
Fixes #4980
2020-07-29 11:34:24 +02:00
Rajaa.BARHTAOUI
68b7ed284a feat(xo-web/backup/health): show detached VM snapshots (#5125)
Fixes #5086
2020-07-29 10:56:31 +02:00
badrAZ
e782895cf5 feat(xo-server-audit): add extension to exported records (#5180) 2020-07-29 10:52:06 +02:00
Julien Fontanet
a5935b40d5 feat(xo-server/api): user must be signed in by default (#5175)
It's a lot more secure than previous default value.
2020-07-29 10:40:17 +02:00
badrAZ
035d2cb440 fix(xo-server-audit): fix incorrect records content type (#5179) 2020-07-29 10:38:59 +02:00
BenjiReis
2a74a49995 feat(sdn-controller, xo-web): add & remove network rules to a VM's VIFs (#5177) 2020-07-29 09:56:06 +02:00
badrAZ
902953a1fa feat(xo-server, xo-web): display proxy available upgrades (#5167) 2020-07-28 17:13:01 +02:00
Pierre Donias
1ffef91b7a fix(xo-web/copyVm): correctly pass the VM type to copyVms (#5173)
Fixes xoa-support#2773

The modal uses the type to find the objects (either VMs or snapshots) and use
them to generate the names. The missing type caused the VM clones to be named
`undefined_clone`.
2020-07-28 16:29:37 +02:00
Julien Fontanet
3d13d9b0dc feat(xo-server/addApiMethod): check method props 2020-07-28 16:02:32 +02:00
Nicolas Raynaud
adcc5d5692 feat(import/ova): allow import of gzipped vmdk disks (#5085) 2020-07-28 11:52:44 +02:00
badrAZ
c49d70170e feat(xo-server,xo-web/proxy/deploy): ability to set HTTP proxy (#5145) 2020-07-28 11:51:57 +02:00
badrAZ
349a78a5bd fix(xo-web/file-restore): ignore proxy remotes (#5171)
See xoa-support#2741
2020-07-28 11:43:33 +02:00
badrAZ
48734c6896 fix(xo-web/proxies): don't open proxy VM in new tab (#5172) 2020-07-28 10:23:16 +02:00
Rajaa.BARHTAOUI
0f60a3b24d feat(xo-web/home): ability to filter by power state (#5118) 2020-07-27 16:52:07 +02:00
Pierre Donias
d3a88011a6 feat(xo-server/self): can ignore VM snapshots resources usage (#5164)
See xoa-support#2643

With the config option `selfService.ignoreVmSnapshotResources`
2020-07-27 16:08:20 +02:00
Rajaa.BARHTAOUI
9b6e4c605b feat(xo-web/schedules): ability to enable schedule when editing (#5111)
Fixes #5026

This change affects ordinary jobs only, not backup jobs
2020-07-27 15:46:21 +02:00
Nicolas Raynaud
7c91524111 fix(OVA import): allow import of .ova files generated by Red Hat (#5159)
See xoa-support#2713.

 - use <rasd:Address> when <rasd:AddressOnParent> is not available on disks
 - avoid dotfiles and pax headers in tar parser.
2020-07-27 08:24:01 +02:00
Olivier Lambert
e1573069e4 fix(docs/license): truly fix the warning tag (#5169) 2020-07-24 10:08:58 +02:00
marcpezin
f2459c964b Fixing warning closing tag (#5168)
Fixing warning closing tag
2020-07-24 09:20:36 +02:00
Pierre Donias
43aa0b815d feat(xo-web/VM/disks): sort disks by device by default (#5165)
Fixes #5163
2020-07-23 09:53:07 +02:00
marcpezin
0740630e05 fix(docs): rebind license documentation update (#5166) 2020-07-21 17:16:18 +02:00
Rajaa.BARHTAOUI
c9244b2b13 feat(xo-web): log Invalid XML-RPC message error as an unexpected response (#5138)
See xoa-support#2588
2020-07-21 15:11:57 +02:00
Pierre Donias
0d398f867f fix(xo-web/home): link to global filter section (#5157) 2020-07-16 15:32:44 +02:00
Julien Fontanet
b74ec2d7d3 fix(xo-web/backup/restore): dont fail if no backups for a VM (#5156)
Fixes xoa-support#2707

The API does not guarantee that each VM UUID will have at least a backup, and when it happens, the code failed because `first` and `last` where not properly defined.
2020-07-16 14:29:10 +02:00
Julien Fontanet
26a295c8ed chore(xo-web/createSubscription/run): add small comment 2020-07-13 16:13:33 +02:00
Julien Fontanet
2a71d3d20c chore(xo-web/createSubscription): rename loop to run 2020-07-13 16:10:48 +02:00
187 changed files with 10354 additions and 3050 deletions

View File

@@ -46,7 +46,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}

View File

@@ -25,7 +25,7 @@ export class Storage {
//
// http://man7.org/linux/man-pages/man3/crypt.3.html#NOTES
const ID_TO_ALGORITHM = {
'5': 'sha256',
5: 'sha256',
}
export class AlteredRecordError extends Error {

View File

@@ -3,6 +3,17 @@ const { dirname } = require('path')
const fs = require('promise-toolbox/promisifyAll')(require('fs'))
module.exports = fs
fs.getSize = path =>
fs.stat(path).then(
_ => _.size,
error => {
if (error.code === 'ENOENT') {
return 0
}
throw error
}
)
fs.mktree = async function mkdirp(path) {
try {
await fs.mkdir(path)

View File

@@ -8,9 +8,10 @@ let force
const assert = require('assert')
const flatten = require('lodash/flatten')
const getopts = require('getopts')
const limitConcurrency = require('limit-concurrency-decorator').default
const lockfile = require('proper-lockfile')
const pipe = require('promise-toolbox/pipe')
const { default: Vhd } = require('vhd-lib')
const { default: Vhd, mergeVhd } = require('vhd-lib')
const { dirname, resolve } = require('path')
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
const { isValidXva } = require('@xen-orchestra/backups/isValidXva')
@@ -26,10 +27,10 @@ const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
//
// the whole chain will be merged into parent, parent will be renamed to child
// and all the others will deleted
async function mergeVhdChain(chain) {
const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain) {
assert(chain.length >= 2)
const child = chain[0]
let child = chain[0]
const parent = chain[chain.length - 1]
const children = chain.slice(0, -1).reverse()
@@ -46,15 +47,36 @@ async function mergeVhdChain(chain) {
// `mergeVhd` does not work with a stream, either
// - make it accept a stream
// - or create synthetic VHD which is not a stream
return console.warn('TODO: implement merge')
// await mergeVhd(
// handler,
// parent,
// handler,
// children.length === 1
// ? child
// : await createSyntheticStream(handler, children)
// )
if (children.length !== 1) {
console.warn('TODO: implement merging multiple children')
children.length = 1
child = children[0]
}
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
console.log('merging %s: %s/%s', child, done, total)
}
}, 10e3)
await mergeVhd(
handler,
parent,
handler,
child,
// children.length === 1
// ? child
// : await createSyntheticStream(handler, children),
{
onProgress({ done: d, total: t }) {
done = d
total = t
},
}
)
clearInterval(handle)
}
await Promise.all([
@@ -66,7 +88,7 @@ async function mergeVhdChain(chain) {
return force && handler.unlink(child)
}),
])
}
})
const listVhds = pipe([
vmDir => vmDir + '/vdis',

View File

@@ -0,0 +1,58 @@
const groupBy = require('lodash/groupBy')
const { createHash } = require('crypto')
const { dirname, resolve } = require('path')
const asyncMap = require('../_asyncMap')
const { readdir2, readFile, getSize } = require('../_fs')
const sha512 = str => createHash('sha512').update(str).digest('hex')
const sum = values => values.reduce((a, b) => a + b)
module.exports = async function info(vmDirs) {
const jsonFiles = (
await asyncMap(vmDirs, async vmDir =>
(await readdir2(vmDir)).filter(_ => _.endsWith('.json'))
)
).flat()
const hashes = { __proto__: null }
const info = (
await asyncMap(jsonFiles, async jsonFile => {
try {
const jsonDir = dirname(jsonFile)
const json = await readFile(jsonFile)
const hash = sha512(json)
if (hash in hashes) {
console.log(jsonFile, 'duplicate of', hashes[hash])
return
}
hashes[hash] = jsonFile
const metadata = JSON.parse(json)
return {
jsonDir,
jsonFile,
metadata,
size:
json.length +
(await (metadata.mode === 'delta'
? asyncMap(Object.values(metadata.vhds), _ =>
getSize(resolve(jsonDir, _))
).then(sum)
: getSize(resolve(jsonDir, metadata.xva)))),
}
} catch (error) {
console.error(jsonFile, error)
}
})
).filter(_ => _ !== undefined)
const byJobs = groupBy(info, 'metadata.jobId')
Object.keys(byJobs)
.sort()
.forEach(jobId => {
console.log(jobId, sum(byJobs[jobId].map(_ => _.size)))
})
}

View File

@@ -13,6 +13,12 @@ require('./_composeCommands')({
},
usage: 'xo-vm-backups <field path>',
},
info: {
get main() {
return require('./commands/info')
},
usage: 'xo-vm-backups/*',
},
})(process.argv.slice(2), 'xo-backups').catch(error => {
console.error('main', error)
process.exitCode = 1

View File

@@ -7,9 +7,10 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/backups": "^0.1.1",
"@xen-orchestra/fs": "^0.10.4",
"@xen-orchestra/fs": "^0.11.1",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.15",
"promise-toolbox": "^0.15.0",
"proper-lockfile": "^4.1.1",
@@ -32,7 +33,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.0.0",
"version": "0.2.0",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -16,7 +16,7 @@
"postversion": "npm publish --access public"
},
"dependencies": {
"d3-time-format": "^2.2.3",
"d3-time-format": "^3.0.0",
"fs-extra": "^9.0.0"
},
"license": "AGPL-3.0-or-later",

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "0.10.4",
"version": "0.11.1",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
@@ -25,17 +25,18 @@
"@marsaud/smb2": "^0.15.0",
"@sindresorhus/df": "^3.1.1",
"@xen-orchestra/async-map": "^0.0.0",
"aws-sdk": "^2.686.0",
"decorator-synchronized": "^0.5.0",
"execa": "^4.0.2",
"fs-extra": "^9.0.0",
"get-stream": "^5.1.0",
"get-stream": "^6.0.0",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.15.0",
"readable-stream": "^3.0.6",
"through2": "^3.0.0",
"tmp": "^0.1.0",
"xo-remote-parser": "^0.5.0"
"through2": "^4.0.2",
"tmp": "^0.2.1",
"xo-remote-parser": "^0.6.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -58,7 +59,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
},
"author": {

View File

@@ -5,7 +5,7 @@ import getStream from 'get-stream'
import asyncMap from '@xen-orchestra/async-map'
import limit from 'limit-concurrency-decorator'
import path from 'path'
import path, { basename } from 'path'
import synchronized from 'decorator-synchronized'
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
import { parse } from 'xo-remote-parser'
@@ -121,6 +121,7 @@ export default class RemoteHandlerAbstract {
await this.__closeFile(fd)
}
// TODO: remove method
async createOutputStream(
file: File,
{ checksum = false, ...options }: Object = {}
@@ -221,19 +222,15 @@ export default class RemoteHandlerAbstract {
)
}
createWriteStream(
file: File,
options: { end?: number, flags?: string, start?: number } = {}
): Promise<LaxWritable> {
return timeout.call(
this._createWriteStream(
typeof file === 'string' ? normalizePath(file) : file,
{
flags: 'wx',
...options,
}
)
)
// write a stream to a file using a temporary file
async outputStream(
input: Readable | Promise<Readable>,
path: string,
{ checksum = true }: { checksum?: boolean } = {}
): Promise<void> {
path = normalizePath(path)
input = await input
return this._outputStream(await input, normalizePath(path), { checksum })
}
// Free the resources possibly dedicated to put the remote at work, when it
@@ -321,18 +318,6 @@ export default class RemoteHandlerAbstract {
return this._readFile(normalizePath(file), { flags })
}
async refreshChecksum(path: string): Promise<void> {
path = normalizePath(path)
const stream = (await this._createReadStream(path, { flags: 'r' })).pipe(
createChecksumStream()
)
stream.resume() // start reading the whole file
await this._outputFile(checksumFile(path), await stream.checksum, {
flags: 'wx',
})
}
async rename(
oldPath: string,
newPath: string,
@@ -548,6 +533,22 @@ export default class RemoteHandlerAbstract {
return this._outputFile(file, data, options)
}
async _outputStream(input, path, { checksum }) {
const tmpPath = `${dirname(path)}/.${basename(path)}`
const output = await this.createOutputStream(tmpPath, { checksum })
try {
input.pipe(output)
await fromEvent(output, 'finish')
await output.checksumWritten
// $FlowFixMe
await input.task
await this.rename(tmpPath, path, { checksum })
} catch (error) {
await this.unlink(tmpPath, { checksum })
throw error
}
}
_read(
file: File,
buffer: Buffer,

View File

@@ -42,18 +42,6 @@ describe('createOutputStream()', () => {
})
})
describe('createReadStream()', () => {
it(`throws in case of timeout`, async () => {
const testHandler = new TestHandler({
createReadStream: () => new Promise(() => {}),
})
const promise = testHandler.createReadStream('file')
jest.advanceTimersByTime(TIMEOUT)
await expect(promise).rejects.toThrowError(TimeoutError)
})
})
describe('getInfo()', () => {
it('throws in case of timeout', async () => {
const testHandler = new TestHandler({

View File

@@ -2,7 +2,6 @@
import 'dotenv/config'
import asyncIteratorToStream from 'async-iterator-to-stream'
import getStream from 'get-stream'
import { forOwn, random } from 'lodash'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
@@ -91,31 +90,6 @@ handlers.forEach(url => {
})
})
describe('#createReadStream()', () => {
beforeEach(() => handler.outputFile('file', TEST_DATA))
testWithFileDescriptor('file', 'r', async ({ file, flags }) => {
await expect(
await getStream.buffer(
await handler.createReadStream(file, { flags })
)
).toEqual(TEST_DATA)
})
})
describe('#createWriteStream()', () => {
testWithFileDescriptor('file', 'wx', async ({ file, flags }) => {
const stream = await handler.createWriteStream(file, { flags })
await fromCallback(pipeline, createTestDataStream(), stream)
await expect(await handler.readFile('file')).toEqual(TEST_DATA)
})
it('fails if parent dir is missing', async () => {
const error = await rejectionOf(handler.createWriteStream('dir/file'))
expect(error.code).toBe('ENOENT')
})
})
describe('#getInfo()', () => {
let info
beforeAll(async () => {

View File

@@ -4,6 +4,7 @@ import execa from 'execa'
import type RemoteHandler from './abstract'
import RemoteHandlerLocal from './local'
import RemoteHandlerNfs from './nfs'
import RemoteHandlerS3 from './s3'
import RemoteHandlerSmb from './smb'
import RemoteHandlerSmbMount from './smb-mount'
@@ -13,6 +14,7 @@ export type Remote = { url: string }
const HANDLERS = {
file: RemoteHandlerLocal,
nfs: RemoteHandlerNfs,
s3: RemoteHandlerS3,
}
try {

284
@xen-orchestra/fs/src/s3.js Normal file
View File

@@ -0,0 +1,284 @@
import AWS from 'aws-sdk'
import { parse } from 'xo-remote-parser'
import RemoteHandlerAbstract from './abstract'
import { createChecksumStream } from './checksum'
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
// limits: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
const MIN_PART_SIZE = 1024 * 1024 * 5 // 5MB
const MAX_PART_SIZE = 1024 * 1024 * 1024 * 5 // 5GB
const MAX_PARTS_COUNT = 10000
const MAX_OBJECT_SIZE = 1024 * 1024 * 1024 * 1024 * 5 // 5TB
const IDEAL_FRAGMENT_SIZE = Math.ceil(MAX_OBJECT_SIZE / MAX_PARTS_COUNT) // the smallest fragment size that still allows a 5TB upload in 10000 fragments, about 524MB
export default class S3Handler extends RemoteHandlerAbstract {
constructor(remote, _opts) {
super(remote)
const { host, path, username, password } = parse(remote.url)
// https://www.zenko.io/blog/first-things-first-getting-started-scality-s3-server/
this._s3 = new AWS.S3({
accessKeyId: username,
apiVersion: '2006-03-01',
endpoint: host,
s3ForcePathStyle: true,
secretAccessKey: password,
signatureVersion: 'v4',
})
const splitPath = path.split('/').filter(s => s.length)
this._bucket = splitPath.shift()
this._dir = splitPath.join('/')
}
get type() {
return 's3'
}
_createParams(file) {
return { Bucket: this._bucket, Key: this._dir + file }
}
async _outputStream(input, path, { checksum }) {
let inputStream = input
if (checksum) {
const checksumStream = createChecksumStream()
const forwardError = error => {
checksumStream.emit('error', error)
}
input.pipe(checksumStream)
input.on('error', forwardError)
inputStream = checksumStream
}
const upload = this._s3.upload(
{
...this._createParams(path),
Body: inputStream,
},
{ partSize: IDEAL_FRAGMENT_SIZE }
)
await upload.promise()
if (checksum) {
const checksum = await inputStream.checksum
const params = {
...this._createParams(path + '.checksum'),
Body: checksum,
}
await this._s3.upload(params).promise()
}
await input.task
}
async _writeFile(file, data, options) {
return this._s3
.putObject({ ...this._createParams(file), Body: data })
.promise()
}
async _createReadStream(file, options) {
return this._s3.getObject(this._createParams(file)).createReadStream()
}
async _unlink(file) {
return this._s3.deleteObject(this._createParams(file)).promise()
}
async _list(dir) {
function splitPath(path) {
return path.split('/').filter(d => d.length)
}
const prefix = [this._dir, dir].join('/')
const splitPrefix = splitPath(prefix)
const request = this._s3.listObjectsV2({
Bucket: this._bucket,
Prefix: splitPrefix.join('/'),
})
const result = await request.promise()
const uniq = new Set()
for (const entry of result.Contents) {
const line = splitPath(entry.Key)
if (line.length > splitPrefix.length) {
uniq.add(line[splitPrefix.length])
}
}
return [...uniq]
}
async _rename(oldPath, newPath) {
const params = {
...this._createParams(newPath),
CopySource: `/${this._bucket}/${this._dir}${oldPath}`,
}
await this._s3.copyObject(params).promise()
await this._s3.deleteObject(this._createParams(oldPath)).promise()
}
async _getSize(file) {
if (typeof file !== 'string') {
file = file.fd
}
const result = await this._s3.headObject(this._createParams(file)).promise()
return +result.ContentLength
}
async _read(file, buffer, position = 0) {
if (typeof file !== 'string') {
file = file.fd
}
const params = this._createParams(file)
params.Range = `bytes=${position}-${position + buffer.length - 1}`
const result = await this._s3.getObject(params).promise()
result.Body.copy(buffer)
return { bytesRead: result.Body.length, buffer }
}
async _write(file, buffer, position) {
if (typeof file !== 'string') {
file = file.fd
}
const uploadParams = this._createParams(file)
const fileSize = +(await this._s3.headObject(uploadParams).promise())
.ContentLength
if (fileSize < MIN_PART_SIZE) {
const resultBuffer = Buffer.alloc(
Math.max(fileSize, position + buffer.length)
)
const fileContent = (await this._s3.getObject(uploadParams).promise())
.Body
fileContent.copy(resultBuffer)
buffer.copy(resultBuffer, position)
await this._s3
.putObject({ ...uploadParams, Body: resultBuffer })
.promise()
return { buffer, bytesWritten: buffer.length }
} else {
// using this trick: https://stackoverflow.com/a/38089437/72637
// multipart fragments have a minimum size of 5Mo and a max of 5Go unless they are last
// splitting the file in 3 parts: [prefix, edit, suffix]
// if `prefix` is bigger than 5Mo, it will be sourced from uploadPartCopy()
// otherwise otherwise it will be downloaded, concatenated to `edit`
// `edit` will always be an upload part
// `suffix` will ways be sourced from uploadPartCopy()
const multipartParams = await this._s3
.createMultipartUpload(uploadParams)
.promise()
try {
const parts = []
const prefixSize = position
let suffixOffset = prefixSize + buffer.length
let suffixSize = Math.max(0, fileSize - suffixOffset)
let hasSuffix = suffixSize > 0
let editBuffer = buffer
let editBufferOffset = position
let partNumber = 1
if (prefixSize < MIN_PART_SIZE) {
const downloadParams = {
...uploadParams,
Range: `bytes=0-${prefixSize - 1}`,
}
const prefixBuffer =
prefixSize > 0
? (await this._s3.getObject(downloadParams).promise()).Body
: Buffer.alloc(0)
editBuffer = Buffer.concat([prefixBuffer, buffer])
editBufferOffset = 0
} else {
const fragmentsCount = Math.ceil(prefixSize / MAX_PART_SIZE)
const prefixFragmentSize = Math.ceil(prefixSize / fragmentsCount)
const lastFragmentSize =
prefixFragmentSize * fragmentsCount - prefixSize
let prefixPosition = 0
for (let i = 0; i < fragmentsCount; i++) {
const copyPrefixParams = {
...multipartParams,
PartNumber: partNumber++,
CopySource: `/${this._bucket}/${this._dir + file}`,
CopySourceRange: `bytes=${prefixPosition}-${
prefixPosition + prefixFragmentSize - 1
}`,
}
const prefixPart = (
await this._s3.uploadPartCopy(copyPrefixParams).promise()
).CopyPartResult
parts.push({
ETag: prefixPart.ETag,
PartNumber: copyPrefixParams.PartNumber,
})
prefixPosition += prefixFragmentSize
}
if (lastFragmentSize) {
}
}
if (hasSuffix && editBuffer.length < MIN_PART_SIZE) {
// the edit fragment is too short and is not the last fragment
// let's steal from the suffix fragment to reach the minimum size
// the suffix might be too short and itself entirely absorbed in the edit fragment, making it the last one.
const complementSize = Math.min(
MIN_PART_SIZE - editBuffer.length,
suffixSize
)
const complementOffset = editBufferOffset + editBuffer.length
suffixOffset += complementSize
suffixSize -= complementSize
hasSuffix = suffixSize > 0
const prefixRange = `bytes=${complementOffset}-${
complementOffset + complementSize - 1
}`
const downloadParams = { ...uploadParams, Range: prefixRange }
const complementBuffer = (
await this._s3.getObject(downloadParams).promise()
).Body
editBuffer = Buffer.concat([editBuffer, complementBuffer])
}
const editParams = {
...multipartParams,
Body: editBuffer,
PartNumber: partNumber++,
}
const editPart = await this._s3.uploadPart(editParams).promise()
parts.push({ ETag: editPart.ETag, PartNumber: editParams.PartNumber })
if (hasSuffix) {
const suffixFragments = Math.ceil(suffixSize / MAX_PART_SIZE)
const suffixFragmentsSize = Math.ceil(suffixSize / suffixFragments)
let suffixFragmentOffset = suffixOffset
for (let i = 0; i < suffixFragments; i++) {
const fragmentEnd = suffixFragmentOffset + suffixFragmentsSize
const suffixRange = `bytes=${suffixFragmentOffset}-${
Math.min(fileSize, fragmentEnd) - 1
}`
const copySuffixParams = {
...multipartParams,
PartNumber: partNumber++,
CopySource: `/${this._bucket}/${this._dir + file}`,
CopySourceRange: suffixRange,
}
const suffixPart = (
await this._s3.uploadPartCopy(copySuffixParams).promise()
).CopyPartResult
parts.push({
ETag: suffixPart.ETag,
PartNumber: copySuffixParams.PartNumber,
})
suffixFragmentOffset = fragmentEnd
}
}
await this._s3
.completeMultipartUpload({
...multipartParams,
MultipartUpload: { Parts: parts },
})
.promise()
} catch (e) {
await this._s3.abortMultipartUpload(multipartParams).promise()
throw e
}
}
}
async _openFile(path, flags) {
return path
}
async _closeFile(fd) {}
}

View File

@@ -49,7 +49,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -0,0 +1,24 @@
/benchmark/
/benchmarks/
*.bench.js
*.bench.js.map
/examples/
example.js
example.js.map
*.example.js
*.example.js.map
/fixture/
/fixtures/
*.fixture.js
*.fixture.js.map
*.fixtures.js
*.fixtures.js.map
/test/
/tests/
*.spec.js
*.spec.js.map
__snapshots__/

View File

@@ -0,0 +1,139 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @xen-orchestra/openflow [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
> Pack and unpack OpenFlow messages
## Install
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/openflow):
```
> npm install --save @xen-orchestra/openflow
```
## Usage
Unpacking a received OpenFlow message from a socket:
```js
import openflow from '@xen-orchestra/openflow'
import parse from '@xen-orchestra/openflow/parse-socket'
const version = openflow.versions.openFlow11
const ofProtocol = openflow.protocols[version]
function parseOpenFlowMessages(socket) {
for await (const msg of parse(socket)) {
if (msg.header !== undefined) {
const ofType = msg.header.type
switch (ofType) {
case ofProtocol.type.hello:
// Handle OFPT_HELLO
break
case ofProtocol.type.error:
// Handle OFPT_ERROR
break
case ofProtocol.type.echoRequest:
// Handle OFPT_ECHO_REQUEST
break
case ofProtocol.type.packetIn:
// Handle OFPT_PACKET_IN
break
case ofProtocol.type.featuresReply:
// Handle OFPT_FEATURES_REPLY
break
case ofProtocol.type.getConfigReply:
// Handle OFPT_GET_CONFIG_REPLY
break
case ofProtocol.type.portStatus:
// Handle OFPT_PORT_STATUS
break
case ofProtocol.type.flowRemoved:
// Handle OFPT_FLOW_REMOVED
break
default:
// Error: Invalid type
break
}
} else {
// Error: Message is unparseable
}
}
}
```
Unpacking a OpenFlow message from a buffer:
```js
import openflow from '@xen-orchestra/openflow'
const version = openflow.versions.openFlow11
const ofProtocol = openflow.protocols[version]
function processOpenFlowMessage(buf) {
const unpacked = openflow.unpack(buf)
const ofType = unpacked.header.type
switch (ofType) {
case ofProtocol.type.hello:
// Handle OFPT_HELLO
break
case ofProtocol.type.error:
// Handle OFPT_ERROR
break
case ofProtocol.type.echoRequest:
// Handle OFPT_ECHO_REQUEST
break
case ofProtocol.type.packetIn:
// Handle OFPT_PACKET_IN
break
case ofProtocol.type.featuresReply:
// Handle OFPT_FEATURES_REPLY
break
case ofProtocol.type.getConfigReply:
// Handle OFPT_GET_CONFIG_REPLY
break
case ofProtocol.type.portStatus:
// Handle OFPT_PORT_STATUS
break
case ofProtocol.type.flowRemoved:
// Handle OFPT_FLOW_REMOVED
break
default:
// Error: Invalid type
break
}
}
```
Packing an OpenFlow OFPT_HELLO message:
```js
import openflow from '@xen-orchestra/openflow'
const version = openflow.versions.openFlow11
const ofProtocol = openflow.protocols[version]
const buf = openflow.pack({
header: {
version,
type: ofProtocol.type.hello,
xid: 1,
},
})
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
© [Vates SAS](https://vates.fr)

View File

@@ -0,0 +1,108 @@
Unpacking a received OpenFlow message from a socket:
```js
import openflow from '@xen-orchestra/openflow'
import parse from '@xen-orchestra/openflow/parse-socket'
const version = openflow.versions.openFlow11
const ofProtocol = openflow.protocols[version]
function parseOpenFlowMessages(socket) {
for await (const msg from parse(socket)) {
if (msg.header !== undefined) {
const ofType = msg.header.type
switch (ofType) {
case ofProtocol.type.hello:
// Handle OFPT_HELLO
break
case ofProtocol.type.error:
// Handle OFPT_ERROR
break
case ofProtocol.type.echoRequest:
// Handle OFPT_ECHO_REQUEST
break
case ofProtocol.type.packetIn:
// Handle OFPT_PACKET_IN
break
case ofProtocol.type.featuresReply:
// Handle OFPT_FEATURES_REPLY
break
case ofProtocol.type.getConfigReply:
// Handle OFPT_GET_CONFIG_REPLY
break
case ofProtocol.type.portStatus:
// Handle OFPT_PORT_STATUS
break
case ofProtocol.type.flowRemoved:
// Handle OFPT_FLOW_REMOVED
break
default:
// Error: Invalid type
break
}
} else {
// Error: Message is unparseable
}
}
}
```
Unpacking a OpenFlow message from a buffer:
```js
import openflow from '@xen-orchestra/openflow'
const version = openflow.versions.openFlow11
const ofProtocol = openflow.protocols[version]
function processOpenFlowMessage(buf) {
const unpacked = openflow.unpack(buf)
const ofType = unpacked.header.type
switch (ofType) {
case ofProtocol.type.hello:
// Handle OFPT_HELLO
break
case ofProtocol.type.error:
// Handle OFPT_ERROR
break
case ofProtocol.type.echoRequest:
// Handle OFPT_ECHO_REQUEST
break
case ofProtocol.type.packetIn:
// Handle OFPT_PACKET_IN
break
case ofProtocol.type.featuresReply:
// Handle OFPT_FEATURES_REPLY
break
case ofProtocol.type.getConfigReply:
// Handle OFPT_GET_CONFIG_REPLY
break
case ofProtocol.type.portStatus:
// Handle OFPT_PORT_STATUS
break
case ofProtocol.type.flowRemoved:
// Handle OFPT_FLOW_REMOVED
break
default:
// Error: Invalid type
break
}
}
```
Packing an OpenFlow OFPT_HELLO message:
```js
import openflow from '@xen-orchestra/openflow'
const version = openflow.versions.openFlow11
const ofProtocol = openflow.protocols[version]
const buf = openflow.pack({
header: {
version,
type: ofProtocol.type.hello,
xid: 1,
},
})
```

View File

@@ -0,0 +1,35 @@
{
"description": "Pack and unpack OpenFlow messages",
"private": false,
"name": "@xen-orchestra/openflow",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/openflow",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/openflow",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.1.1",
"engines": {
"node": ">=8.10"
},
"main": "dist/",
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"postversion": "npm publish --access public",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"devDependencies": {
"@babel/cli": "^7.7.4",
"@babel/core": "^7.7.4",
"@babel/preset-env": "^7.7.4",
"cross": "^1.0.0",
"rimraf": "^3.0.0"
},
"dependencies": {
"@vates/read-chunk": "^0.1.0"
}
}

View File

@@ -0,0 +1 @@
module.exports = require('./dist/parse-socket')

View File

@@ -0,0 +1,9 @@
export default {
size: 8,
offsets: {
version: 0,
type: 1,
length: 2,
xid: 4,
},
}

View File

@@ -0,0 +1,38 @@
import get from './util/get-from-map'
import ofVersion from './version'
// TODO: More openflow versions
import of11 from './openflow-11/index'
import scheme from './default-header-scheme'
// =============================================================================
const OPENFLOW = {
[ofVersion.openFlow11]: of11,
}
// =============================================================================
export default {
versions: ofVersion,
protocols: { [ofVersion.openFlow11]: of11.protocol },
// ---------------------------------------------------------------------------
pack: object => {
const version = object.header.version
return get(
OPENFLOW,
version,
`Unsupported OpenFlow version: ${version}`
).pack(object)
},
unpack: (buffer, offset = 0) => {
const version = buffer.readUInt8(offset + scheme.offsets.version)
return get(
OPENFLOW,
version,
`Unsupported OpenFlow version: ${version}`
).unpack(buffer, offset)
},
}

View File

@@ -0,0 +1,58 @@
import get from '../../util/get-from-map'
import ofOutput from './output'
import of from '../openflow-11'
// =============================================================================
const ACTION = {
[of.actionType.output]: ofOutput,
/* TODO:
[of.actionType.group]: ,
[of.actionType.setVlanId]: ,
[of.actionType.setVlanPcp]: ,
[of.actionType.setDlSrc]: ,
[of.actionType.setDlDst]: ,
[of.actionType.setNwSrc]: ,
[of.actionType.setNwDst]: ,
[of.actionType.setNwTos]: ,
[of.actionType.setNwEcn]: ,
[of.actionType.setTpSrc]: ,
[of.actionType.setTpDst]: ,
[of.actionType.copyTtlOut]: ,
[of.actionType.copyTtlIn]: ,
[of.actionType.setMplsLabel]: ,
[of.actionType.setMplsTc]: ,
[of.actionType.setMplsTtl]: ,
[of.actionType.decMplsTtl]: ,
[of.actionType.pushVlan]: ,
[of.actionType.popVlan]: ,
[of.actionType.pushMpls]: ,
[of.actionType.popMpls]: ,
[of.actionType.setQueue]: ,
[of.actionType.setNwTtl]: ,
[of.actionType.decNwTtl]: ,
[of.actionType.experimenter]:
*/
}
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
const { type } = object
return get(ACTION, type, `Invalid action type: ${type}`).pack(
object,
buffer,
offset
)
},
unpack: (buffer, offset = 0) => {
const type = buffer.readUInt16BE(offset + of.offsets.actionHeader.type)
return get(ACTION, type, `Invalid action type: ${type}`).unpack(
buffer,
offset
)
},
}

View File

@@ -0,0 +1,45 @@
import assert from 'assert'
import of from '../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.actionOutput
const PAD_LENGTH = 6
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
assert(object.type === of.actionType.output)
object.len = of.sizes.actionOutput
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.len)
buffer.writeUInt16BE(object.type, offset + OFFSETS.type)
buffer.writeUInt16BE(object.len, offset + OFFSETS.len)
buffer.writeUInt32BE(object.port, offset + OFFSETS.port)
buffer.writeUInt16BE(object.max_len, offset + OFFSETS.maxLen)
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
return buffer
},
unpack: (buffer, offset = 0) => {
const object = {}
object.type = buffer.readUInt16BE(offset + OFFSETS.type)
assert(object.type === of.actionType.output)
object.len = buffer.readUInt16BE(offset + OFFSETS.len)
assert(object.len === of.sizes.actionOutput)
object.port = buffer.readUInt32BE(offset + OFFSETS.port)
object.max_len = buffer.readUInt16BE(offset + OFFSETS.maxLen)
return object
},
}

View File

@@ -0,0 +1,49 @@
import get from '../util/get-from-map'
import echo from './message/echo'
import error from './message/error'
import hello from './message/hello'
import featuresRequest from './message/features-request'
import featuresReply from './message/features-reply'
import getConfigRequest from './message/get-config-request'
import switchConfig from './message/switch-config'
import flowMod from './message/flow-mod'
import of from './openflow-11'
// =============================================================================
const MESSAGE = {
[of.type.hello]: hello,
[of.type.error]: error,
[of.type.featuresRequest]: featuresRequest,
[of.type.featuresReply]: featuresReply,
[of.type.echoRequest]: echo,
[of.type.echoReply]: echo,
[of.type.getConfigRequest]: getConfigRequest,
[of.type.getConfigReply]: switchConfig,
[of.type.setConfig]: switchConfig,
[of.type.flowMod]: flowMod,
}
// =============================================================================
export default {
protocol: of,
// ---------------------------------------------------------------------------
pack: object => {
const type = object.header.type
return get(MESSAGE, type, `Invalid OpenFlow message type: ${type}`).pack(
object
)
},
unpack: (buffer, offset = 0) => {
const type = buffer.readUInt8(offset + of.offsets.header.type)
return get(MESSAGE, type, `Invalid OpenFlow message type: ${type}`).unpack(
buffer,
offset
)
},
}

View File

@@ -0,0 +1,102 @@
import assert from 'assert'
import get from '../../util/get-from-map'
import ofAction from '../action/action'
import of from '../openflow-11'
// =============================================================================
const SIZES = {
[of.actionType.output]: of.sizes.actionOutput,
[of.actionType.group]: of.sizes.actionGroup,
[of.actionType.setVlanId]: of.sizes.actionVlanId,
[of.actionType.setVlanPcp]: of.sizes.actionVlanPcp,
[of.actionType.setDlSrc]: of.sizes.actionDlAddr,
[of.actionType.setDlDst]: of.sizes.actionDlAddr,
[of.actionType.setNwSrc]: of.sizes.actionNwAddr,
[of.actionType.setNwDst]: of.sizes.actionNwAddr,
[of.actionType.setNwTos]: of.sizes.actionNwTos,
[of.actionType.setNwEcn]: of.sizes.actionNwEcn,
[of.actionType.setTpSrc]: of.sizes.actionTpPort,
[of.actionType.setTpDst]: of.sizes.actionTpPort,
[of.actionType.copyTtlOut]: of.sizes.actionHeader,
[of.actionType.copyTtlIn]: of.sizes.actionHeader,
[of.actionType.setMplsLabel]: of.sizes.actionMplsLabel,
[of.actionType.setMplsTc]: of.sizes.actionMplsTc,
[of.actionType.setMplsTtl]: of.sizes.actionMplsTtl,
[of.actionType.decMplsTtl]: of.sizes.actionMplsTtl,
[of.actionType.pushVlan]: of.sizes.actionPush,
[of.actionType.popVlan]: of.sizes.actionHeader,
[of.actionType.pushMpls]: of.sizes.actionPush,
[of.actionType.popMpls]: of.sizes.actionPopMpls,
[of.actionType.setQueue]: of.sizes.actionSetQueue,
[of.actionType.setNwTtl]: of.sizes.actionNwTtl,
[of.actionType.decNwTtl]: of.sizes.actionNwTtl,
}
// -----------------------------------------------------------------------------
const TYPES = [
of.instructionType.clearActions,
of.instructionType.writeActions,
of.instructionType.applyActions,
]
const OFFSETS = of.offsets.instructionActions
const PAD_LENGTH = 4
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
const { type } = object
assert(TYPES.includes(type))
object.len = of.sizes.instructionActions
const { actions = [] } = object
actions.forEach(action => {
assert(Object.values(of.actionType).includes(action.type))
// TODO: manage experimenter
object.len += get(
SIZES,
action.type,
`Invalid action type: ${action.type}`
)
})
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.len)
buffer.writeUInt16BE(type, offset + OFFSETS.type)
buffer.writeUInt16BE(object.len, offset + OFFSETS.len)
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
let actionOffset = offset + OFFSETS.actions
actions.forEach(action => {
ofAction.pack(action, buffer, actionOffset)
actionOffset += SIZES[action.type]
})
},
unpack: (buffer = undefined, offset = 0) => {
const type = buffer.readUInt16BE(offset + OFFSETS.type)
assert(TYPES.includes(type))
const object = { type }
object.len = buffer.readUInt16BE(offset + OFFSETS.len)
if (type === of.instructionType.clearActions) {
// No actions for this type
return object
}
object.actions = []
let actionOffset = offset + OFFSETS.actions
while (actionOffset < object.len) {
const action = ofAction.unpack(buffer, actionOffset)
actionOffset += action.len
object.actions.push(action)
}
return object
},
}

View File

@@ -0,0 +1,43 @@
import get from '../../util/get-from-map'
import actions from './actions'
// import goToTable from './goToTable'
import of from '../openflow-11'
// import writeMetadata from './writeMetadata'
// =============================================================================
const INSTRUCTION = {
/* TODO:
[of.instructionType.goToTable]: goToTable,
[of.instructionType.writeMetadata]: writeMetadata,
*/
[of.instructionType.writeActions]: actions,
[of.instructionType.applyActions]: actions,
[of.instructionType.clearActions]: actions,
}
// -----------------------------------------------------------------------------
const OFFSETS = of.offsets.instruction
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
const { type } = object
return get(INSTRUCTION, type, `Invalid instruction type: ${type}`).pack(
object,
buffer,
offset
)
},
unpack: (buffer = undefined, offset = 0) => {
const type = buffer.readUInt16BE(offset + OFFSETS.type)
return get(INSTRUCTION, type, `Invalid instruction type: ${type}`).unpack(
buffer,
offset
)
},
}

View File

@@ -0,0 +1,46 @@
import assert from 'assert'
import ofHeader from './header'
import of from '../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.echo
const TYPES = [of.type.echoRequest, of.type.echoReply]
// =============================================================================
export default {
pack: object => {
const { header, data } = object
assert(TYPES.includes(header.type))
const dataSize = data !== undefined ? data.length : 0
header.length = of.sizes.header + dataSize
const buffer = Buffer.alloc(header.length)
ofHeader.pack(header, buffer, OFFSETS.header)
if (dataSize > 0) {
data.copy(buffer, OFFSETS.data, 0, dataSize)
}
return buffer
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
assert(TYPES.includes(header.type))
const object = { header }
const dataSize = header.length - of.sizes.header
if (dataSize > 0) {
object.data = Buffer.alloc(dataSize)
buffer.copy(
object.data,
0,
offset + OFFSETS.data,
offset + OFFSETS.data + dataSize
)
}
return object
},
}

View File

@@ -0,0 +1,79 @@
import assert from 'assert'
import get from '../../util/get-from-map'
import ofHeader from './header'
import of from '../openflow-11'
// =============================================================================
const ERROR_CODE = {
[of.errorType.helloFailed]: of.helloFailedCode,
[of.errorType.badRequest]: of.badRequestCode,
[of.errorType.badAction]: of.badActionCode,
[of.errorType.badInstruction]: of.badInstructionCode,
[of.errorType.badMatch]: of.badMatchCode,
[of.errorType.flowModFailed]: of.flowModFailedCode,
[of.errorType.groupModFailed]: of.groupModFailedCode,
[of.errorType.portModFailed]: of.portModFailedCode,
[of.errorType.tableModFailed]: of.tableModFailedCode,
[of.errorType.queueOpFailed]: of.queueOpFailedCode,
[of.errorType.switchConfigFailed]: of.switchConfigFailedCode,
}
// -----------------------------------------------------------------------------
const OFFSETS = of.offsets.errorMsg
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
const { header, type, code, data } = object
assert(header.type === of.type.error)
const errorCodes = get(ERROR_CODE, type, `Invalid error type: ${type}`)
assert(Object.values(errorCodes).includes(code))
object.length = of.sizes.errorMsg
if (data !== undefined) {
object.length += data.length
}
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.length)
ofHeader.pack(header, buffer, offset + OFFSETS.header)
buffer.writeUInt16BE(type, offset + OFFSETS.type)
buffer.writeUInt16BE(code, offset + OFFSETS.code)
if (data !== undefined) {
data.copy(buffer, offset + OFFSETS.data, 0, data.length)
}
return buffer
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
assert(header.type === of.type.error)
const type = buffer.readUInt16BE(offset + OFFSETS.type)
const errorCodes = get(ERROR_CODE, type, `Invalid error type: ${type}`)
const code = buffer.readUInt16BE(offset + OFFSETS.code)
assert(Object.values(errorCodes).includes(code))
const object = { header, type, code }
const dataSize = header.length - of.sizes.errorMsg
if (dataSize > 0) {
object.data = Buffer.alloc(dataSize)
buffer.copy(
object.data,
0,
offset + OFFSETS.data,
offset + OFFSETS.data + dataSize
)
}
return object
},
}

View File

@@ -0,0 +1,73 @@
import assert from 'assert'
import ofHeader from './header'
import ofPort from '../struct/port'
import of from '../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.switchFeatures
const PAD_LENGTH = 3
// =============================================================================
export default {
pack: object => {
const {
header,
datapath_id: did,
n_buffers: nBufs,
n_tables: nTables,
capabilities,
reserved,
ports,
} = object
assert(header.type === of.type.featuresReply)
header.length = of.sizes.switchFeatures + ports.length * of.sizes.port
const buffer = Buffer.alloc(header.length)
ofHeader.pack(header, buffer, OFFSETS.header)
buffer.writeBigUInt64BE(did, OFFSETS.datapathId)
buffer.writeUInt32BE(nBufs, OFFSETS.nBuffers)
buffer.writeUInt8(nTables, OFFSETS.nTables)
buffer.fill(0, OFFSETS.pad, OFFSETS.pad + PAD_LENGTH)
buffer.writeUInt32BE(capabilities, OFFSETS.capabilities)
buffer.writeUInt32BE(reserved, OFFSETS.reserved)
let portsOffset = 0
ports.forEach(port => {
ofPort.pack(port, buffer, OFFSETS.ports + portsOffset++ * of.sizes.port)
})
return buffer
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
assert(header.type === of.type.featuresReply)
const object = { header }
object.datapath_id = buffer.toString(
'hex',
offset + OFFSETS.datapathId,
offset + OFFSETS.datapathId + 8
)
object.n_buffers = buffer.readUInt32BE(offset + OFFSETS.nBuffers)
object.n_tables = buffer.readUInt8(offset + OFFSETS.nTables)
object.capabilities = buffer.readUInt32BE(offset + OFFSETS.capabilities)
object.reserved = buffer.readUInt32BE(offset + OFFSETS.reserved)
object.ports = []
const nPorts = (header.length - of.sizes.switchFeatures) / of.sizes.port
for (let i = 0; i < nPorts; ++i) {
object.ports.push(
ofPort.unpack(buffer, offset + OFFSETS.ports + i * of.sizes.port)
)
}
return object
},
}

View File

@@ -0,0 +1,24 @@
import assert from 'assert'
import ofHeader from './header'
import of from '../openflow-11'
// =============================================================================
export default {
pack: object => {
const { header } = object
assert(header.type === of.type.featuresRequest)
header.length = of.sizes.featuresRequest
return ofHeader.pack(header)
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset)
assert(header.type === of.type.featuresRequest)
assert(header.length === of.sizes.featuresRequest)
return { header }
},
}

View File

@@ -0,0 +1,197 @@
import assert from 'assert'
import get from '../../util/get-from-map'
import ofInstruction from '../instruction/instruction'
import uIntHelper from '../../util/uint-helper'
import ofHeader from './header'
import of from '../openflow-11'
import ofMatch from '../struct/match/match'
// =============================================================================
const INSTRUCTION_SIZE = {
[of.instructionType.goToTable]: of.sizes.instructionWriteMetadata,
[of.instructionType.writeMetadata]: of.sizes.instructionGotoTable,
[of.instructionType.clearActions]: of.sizes.instructionActions,
[of.instructionType.writeActions]: of.sizes.instructionActions,
[of.instructionType.applyActions]: of.sizes.instructionActions,
}
const ACTION_SIZE = {
[of.actionType.output]: of.sizes.actionOutput,
[of.actionType.group]: of.sizes.actionGroup,
[of.actionType.setVlanId]: of.sizes.actionVlanId,
[of.actionType.setVlanPcp]: of.sizes.actionVlanPcp,
[of.actionType.setDlSrc]: of.sizes.actionDlAddr,
[of.actionType.setDlDst]: of.sizes.actionDlAddr,
[of.actionType.setNwSrc]: of.sizes.actionNwAddr,
[of.actionType.setNwDst]: of.sizes.actionNwAddr,
[of.actionType.setNwTos]: of.sizes.actionNwTos,
[of.actionType.setNwEcn]: of.sizes.actionNwEcn,
[of.actionType.setTpSrc]: of.sizes.actionTpPort,
[of.actionType.setTpDst]: of.sizes.actionTpPort,
[of.actionType.copyTtlOut]: of.sizes.actionHeader,
[of.actionType.copyTtlIn]: of.sizes.actionHeader,
[of.actionType.setMplsLabel]: of.sizes.actionMplsLabel,
[of.actionType.setMplsTc]: of.sizes.actionMplsTc,
[of.actionType.setMplsTtl]: of.sizes.actionMplsTtl,
[of.actionType.decMplsTtl]: of.sizes.actionMplsTtl,
[of.actionType.pushVlan]: of.sizes.actionPush,
[of.actionType.popVlan]: of.sizes.actionHeader,
[of.actionType.pushMpls]: of.sizes.actionPush,
[of.actionType.popMpls]: of.sizes.actionPopMpls,
[of.actionType.setQueue]: of.sizes.actionSetQueue,
[of.actionType.setNwTtl]: of.sizes.actionNwTtl,
[of.actionType.decNwTtl]: of.sizes.actionNwTtl,
}
// -----------------------------------------------------------------------------
const OFFSETS = of.offsets.flowMod
const COOKIE_LENGTH = 8
const PAD_LENGTH = 2
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
const {
header,
cookie,
cookie_mask,
table_id = 0,
command,
idle_timeout = 0,
hard_timeout = 0,
priority = of.defaultPriority,
buffer_id = 0xffffffff,
out_port = of.port.any,
out_group = of.group.any,
flags = 0,
match,
instructions = [],
} = object
// fill header length
header.length = of.sizes.flowMod
instructions.forEach(instruction => {
header.length += get(
INSTRUCTION_SIZE,
instruction.type,
`Invalid instruction type: ${instruction.type}`
)
const { actions = [] } = instruction
actions.forEach(action => {
header.length += get(
ACTION_SIZE,
action.type,
`Invalid instruction type: ${action.type}`
)
})
})
buffer = buffer !== undefined ? buffer : Buffer.alloc(header.length)
ofHeader.pack(header, buffer, offset + OFFSETS.header)
if (cookie !== undefined) {
if (cookie_mask !== undefined) {
cookie_mask.copy(buffer, offset + OFFSETS.cookieMask)
} else {
buffer.fill(
0x00,
offset + OFFSETS.cookie_mask,
offset + OFFSETS.cookieMask + COOKIE_LENGTH
)
}
cookie.copy(buffer, offset + OFFSETS.cookie)
} else {
buffer.fill(
0x00,
offset + OFFSETS.cookie,
offset + OFFSETS.cookie + COOKIE_LENGTH
)
buffer.fill(
0xff,
offset + OFFSETS.cookieMask,
offset + OFFSETS.cookieMask + COOKIE_LENGTH
)
}
buffer.writeUInt8(table_id, offset + OFFSETS.tableId)
assert(Object.values(of.flowModCommand).includes(command))
buffer.writeUInt8(command, offset + OFFSETS.command)
buffer.writeUInt16BE(idle_timeout, offset + OFFSETS.idleTimeout)
buffer.writeUInt16BE(hard_timeout, offset + OFFSETS.hardTimeout)
buffer.writeUInt16BE(priority, offset + OFFSETS.priority)
buffer.writeUInt32BE(buffer_id, offset + OFFSETS.bufferId)
buffer.writeUInt32BE(out_port, offset + OFFSETS.outPort)
buffer.writeUInt32BE(out_group, offset + OFFSETS.outGroup)
buffer.writeUInt16BE(flags, offset + OFFSETS.flags)
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
ofMatch.pack(match, buffer, offset + OFFSETS.match)
let instructionOffset = offset + OFFSETS.instructions
instructions.forEach(instruction => {
ofInstruction.pack(instruction, buffer, instructionOffset)
instructionOffset += instruction.len
})
return buffer
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
assert(header.type === of.type.flowMod)
const object = { header }
object.cookie = Buffer.alloc(COOKIE_LENGTH)
buffer.copy(
object.cookie,
0,
offset + OFFSETS.cookie,
offset + OFFSETS.cookie + COOKIE_LENGTH
)
if (
!uIntHelper.isUInt64None([
buffer.readUInt32BE(offset + OFFSETS.cookieMask),
buffer.readUInt32BE(offset + OFFSETS.cookieMask + COOKIE_LENGTH / 2),
])
) {
object.cookie_mask = Buffer.alloc(COOKIE_LENGTH)
buffer.copy(
object.cookie_mask,
0,
offset + OFFSETS.cookieMask,
offset + OFFSETS.cookieMask + COOKIE_LENGTH
)
}
object.table_id = buffer.readUInt8(offset + OFFSETS.tableId)
object.command = buffer.readUInt8(offset + OFFSETS.command)
assert(Object.values(of.flowModCommand).includes(object.command))
object.idle_timeout = buffer.readUInt16BE(offset + OFFSETS.idleTimeout)
object.hard_timeout = buffer.readUInt16BE(offset + OFFSETS.hardTimeout)
object.priority = buffer.readUInt16BE(offset + OFFSETS.priority)
object.buffer_id = buffer.readUInt32BE(offset + OFFSETS.bufferId)
object.out_port = buffer.readUInt32BE(offset + OFFSETS.outPort)
object.out_group = buffer.readUInt32BE(offset + OFFSETS.outGroup)
object.flags = buffer.readUInt16BE(offset + OFFSETS.flags)
object.match = ofMatch.unpack(buffer, offset + OFFSETS.match)
object.instructions = []
let instructionOffset = offset + OFFSETS.instructions
while (instructionOffset < header.length) {
const instruction = ofInstruction.unpack(buffer, instructionOffset)
object.instructions.push(instruction)
instructionOffset += instruction.len
}
return object
},
}

View File

@@ -0,0 +1,24 @@
import assert from 'assert'
import ofHeader from './header'
import of from '../openflow-11'
// =============================================================================
export default {
pack: object => {
const { header } = object
assert(header.type === of.type.getConfigRequest)
header.length = of.sizes.header
return ofHeader.pack(header)
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset)
assert(header.type === of.type.getConfigRequest)
assert(header.length === of.sizes.header)
return { header }
},
}

View File

@@ -0,0 +1,39 @@
import assert from 'assert'
import of from '../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.header
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
buffer = buffer !== undefined ? buffer : Buffer.alloc(of.sizes.header)
const { version, type, length, xid } = object
assert(version === of.version)
assert(Object.values(of.type).includes(type))
buffer.writeUInt8(version, offset + OFFSETS.version)
buffer.writeUInt8(type, offset + OFFSETS.type)
buffer.writeUInt16BE(length, offset + OFFSETS.length)
buffer.writeUInt32BE(xid, offset + OFFSETS.xid)
return buffer
},
unpack: (buffer, offset = 0) => {
const version = buffer.readUInt8(offset + OFFSETS.version)
assert(version === of.version)
const type = buffer.readUInt8(offset + OFFSETS.type)
assert(Object.values(of.type).includes(type))
const length = buffer.readUInt16BE(offset + OFFSETS.length)
const xid = buffer.readUInt32BE(offset + OFFSETS.xid)
return { version, type, length, xid }
},
}

View File

@@ -0,0 +1,27 @@
import assert from 'assert'
import ofHeader from './header'
import of from '../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.hello
// =============================================================================
export default {
pack: object => {
const { header } = object
assert(header.type === of.type.hello)
header.length = of.sizes.hello
return ofHeader.pack(header)
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
assert(header.type === of.type.hello)
return { header }
},
}

View File

@@ -0,0 +1,38 @@
import assert from 'assert'
import ofHeader from './header'
import of from '../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.switchConfig
const TYPES = [of.type.getConfigReply, of.type.setConfig]
// =============================================================================
export default {
pack: object => {
const { header, flags, miss_send_len } = object
assert(TYPES.includes(header.type))
header.length = of.sizes.switchConfig
const buffer = Buffer.alloc(header.length)
ofHeader.pack(header, buffer, OFFSETS.header)
buffer.writeUInt16BE(flags, OFFSETS.flags)
buffer.writeUInt16BE(miss_send_len, OFFSETS.missSendLen)
return buffer
},
unpack: (buffer, offset = 0) => {
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
assert(TYPES.includes(header.type))
assert(header.length === of.sizes.switchConfig)
const flags = buffer.readUInt16BE(offset + OFFSETS.flags)
const miss_send_len = buffer.readUInt16BE(offset + OFFSETS.missSendLen)
return { header, flags, miss_send_len }
},
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,374 @@
import assert from 'assert'
import addressParser from '../../../util/addrress-parser'
import uIntHelper from '../../../util/uint-helper'
import of from '../../openflow-11'
// =============================================================================
const OFFSETS = of.offsets.match
const WILDCARDS = of.flowWildcards
const IP4_ADDR_LEN = 4
const METADATA_LENGTH = 8
const PAD_LENGTH = 1
const PAD2_LENGTH = 3
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
assert(object.type === of.matchType.standard)
object.length = of.sizes.match
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.length)
buffer.writeUInt16BE(object.type, offset + OFFSETS.type)
buffer.writeUInt16BE(object.length, offset + OFFSETS.length)
let wildcards = 0
let inPort = 0
if (object.in_port !== undefined) {
inPort = object.in_port
} else {
wildcards |= WILDCARDS.inPort
}
buffer.writeUInt32BE(inPort, offset + OFFSETS.inPort)
if (object.dl_src !== undefined) {
if (object.dl_src_mask !== undefined) {
addressParser.stringToEth(
object.dl_src_mask,
buffer,
offset + OFFSETS.dlSrcMask
)
} else {
buffer.fill(
0x00,
offset + OFFSETS.dlSrcMask,
offset + OFFSETS.dlSrcMask + of.ethAddrLen
)
}
addressParser.stringToEth(object.dl_src, buffer, offset + OFFSETS.dlSrc)
} else {
buffer.fill(
0x00,
offset + OFFSETS.dlSrc,
offset + OFFSETS.dlSrc + of.ethAddrLen
)
buffer.fill(
0xff,
offset + OFFSETS.dlSrcMask,
offset + OFFSETS.dlSrcMask + of.ethAddrLen
)
}
if (object.dl_dst !== undefined) {
if (object.dl_dst_mask !== undefined) {
addressParser.stringToEth(
object.dl_dst_mask,
buffer,
offset + OFFSETS.dlDstMask
)
} else {
buffer.fill(
0x00,
offset + OFFSETS.dlDstMask,
offset + OFFSETS.dlDstMask + of.ethAddrLen
)
}
addressParser.stringToEth(object.dl_dst, buffer, offset + OFFSETS.dlDst)
} else {
buffer.fill(
0x00,
offset + OFFSETS.dlDst,
offset + OFFSETS.dlDst + of.ethAddrLen
)
buffer.fill(
0xff,
offset + OFFSETS.dlDstMask,
offset + OFFSETS.dlDstMask + of.ethAddrLen
)
}
let dlVlan = 0
if (object.dl_vlan !== undefined) {
dlVlan = object.dl_vlan
} else {
wildcards |= WILDCARDS.dlVlan
}
buffer.writeUInt16BE(dlVlan, offset + OFFSETS.dlVlan)
let dlVlanPcp = 0
if (object.dl_vlan_pcp !== undefined) {
dlVlanPcp = object.dl_vlan_pcp
} else {
wildcards |= WILDCARDS.dlVlanPcp
}
buffer.writeUInt8(dlVlanPcp, offset + OFFSETS.dlVlanPcp)
buffer.fill(0, offset + OFFSETS.pad1, offset + OFFSETS.pad1 + PAD_LENGTH)
let dlType = 0
if (object.dl_type !== undefined) {
dlType = object.dl_type
} else {
wildcards |= WILDCARDS.dlType
}
buffer.writeUInt16BE(dlType, offset + OFFSETS.dlType)
let nwTos = 0
if (object.nw_tos !== undefined) {
nwTos = object.nw_tos
} else {
wildcards |= WILDCARDS.nwTos
}
buffer.writeUInt8(nwTos, offset + OFFSETS.nwTos)
let nwProto = 0
if (object.nw_proto !== undefined) {
nwProto = object.nw_proto
} else {
wildcards |= WILDCARDS.nwProto
}
buffer.writeUInt8(nwProto, offset + OFFSETS.nwProto)
if (object.nw_src !== undefined) {
if (object.nw_src_mask !== undefined) {
addressParser.stringToip4(
object.nw_src_mask,
buffer,
offset + OFFSETS.nwSrcMask
)
} else {
buffer.fill(
0x00,
offset + OFFSETS.nwSrcMask,
offset + OFFSETS.nwSrcMask + IP4_ADDR_LEN
)
}
addressParser.stringToip4(object.nw_src, buffer, offset + OFFSETS.nwSrc)
} else {
buffer.fill(
0x00,
offset + OFFSETS.nwSrc,
offset + OFFSETS.nwSrc + IP4_ADDR_LEN
)
buffer.fill(
0xff,
offset + OFFSETS.nwSrcMask,
offset + OFFSETS.nwSrcMask + IP4_ADDR_LEN
)
}
if (object.nw_dst !== undefined) {
if (object.nw_dst_mask !== undefined) {
addressParser.stringToip4(
object.nw_dst_mask,
buffer,
offset + OFFSETS.nwDstMask
)
} else {
buffer.fill(
0x00,
offset + OFFSETS.nwDstMask,
offset + OFFSETS.nwDstMask + IP4_ADDR_LEN
)
}
addressParser.stringToip4(object.nw_dst, buffer, offset + OFFSETS.nwDst)
} else {
buffer.fill(
0x00,
offset + OFFSETS.nwDst,
offset + OFFSETS.nwDst + IP4_ADDR_LEN
)
buffer.fill(
0xff,
offset + OFFSETS.nwDstMask,
offset + OFFSETS.nwDstMask + IP4_ADDR_LEN
)
}
let tpSrc = 0
if (object.tp_src !== undefined) {
tpSrc = object.tp_src
} else {
wildcards |= WILDCARDS.tpSrc
}
buffer.writeUInt16BE(tpSrc, offset + OFFSETS.tpSrc)
let tpDst = 0
if (object.tp_dst !== undefined) {
tpDst = object.tp_dst
} else {
wildcards |= WILDCARDS.tpDst
}
buffer.writeUInt16BE(tpDst, offset + OFFSETS.tpDst)
let mplsLabel = 0
if (object.mpls_label !== undefined) {
mplsLabel = object.mpls_label
} else {
wildcards |= WILDCARDS.mplsLabel
}
buffer.writeUInt32BE(mplsLabel, offset + OFFSETS.mplsLabel)
let mplsTc = 0
if (object.mpls_tc !== undefined) {
mplsTc = object.mpls_tc
} else {
wildcards |= WILDCARDS.mplsTc
}
buffer.writeUInt8(mplsTc, offset + OFFSETS.mplsTc)
buffer.fill(0, offset + OFFSETS.pad2, offset + OFFSETS.pad2 + PAD2_LENGTH)
if (object.metadata !== undefined) {
if (object.metadata_mask !== undefined) {
buffer.copy(
object.metadata_mask,
0,
offset + OFFSETS.metadataMask,
offset + OFFSETS.metadataMask + METADATA_LENGTH
)
} else {
buffer.fill(
0x00,
offset + OFFSETS.metadataMask,
offset + OFFSETS.metadataMask + METADATA_LENGTH
)
}
buffer.copy(
object.metadata,
0,
offset + OFFSETS.metadata,
offset + OFFSETS.metadata + METADATA_LENGTH
)
} else {
buffer.fill(
0x00,
offset + OFFSETS.metadata,
offset + OFFSETS.metadata + METADATA_LENGTH
)
buffer.fill(
0xff,
offset + OFFSETS.metadataMask,
offset + OFFSETS.metadataMask + METADATA_LENGTH
)
}
buffer.writeUInt32BE(wildcards, offset + OFFSETS.wildcards)
return buffer
},
unpack: (buffer, offset = 0) => {
const object = {}
object.type = buffer.readUInt16BE(offset + OFFSETS.type)
assert(object.type === of.matchType.standard)
object.length = buffer.readUInt16BE(offset + OFFSETS.length)
assert(object.length === of.sizes.match)
// Wildcards indicate which value to use for the match.
// if `wildcards & of.wildcards.<value>` === 0 then `value` is not wildcarded and must be used.
const wildcards = (object.wildcards = buffer.readUInt32BE(
offset + OFFSETS.wildcards
))
if ((wildcards & WILDCARDS.inPort) === 0) {
object.in_port = buffer.readUInt32BE(offset + OFFSETS.inPort)
}
if (!addressParser.isEthMaskAll(buffer, offset + OFFSETS.dlSrcMask)) {
if (!addressParser.isEthMaskNone(buffer, offset + OFFSETS.dlSrcMask)) {
object.dl_src_mask = addressParser.ethToString(
buffer,
offset + OFFSETS.dlSrcMask
)
}
object.dl_src = addressParser.ethToString(buffer, offset + OFFSETS.dlSrc)
}
if (!addressParser.isEthMaskAll(buffer, offset + OFFSETS.dlDstMask)) {
if (!addressParser.isEthMaskNone(buffer, offset + OFFSETS.dlDstMask)) {
object.dl_dst_mask = addressParser.ethToString(
buffer,
offset + OFFSETS.dlDstMask
)
}
object.dl_dst = addressParser.ethToString(buffer, offset + OFFSETS.dlDst)
}
if ((wildcards & WILDCARDS.dlVlan) === 0) {
object.dl_vlan = buffer.readUInt16BE(offset + OFFSETS.dlVlan)
}
if ((wildcards & WILDCARDS.dlVlanPcp) === 0) {
object.dl_vlan_pcp = buffer.readUInt16BE(offset + OFFSETS.dlVlanPcp)
}
if ((wildcards & WILDCARDS.dlType) === 0) {
object.dl_type = buffer.readUInt16BE(offset + OFFSETS.dlType)
}
if ((wildcards & WILDCARDS.nwTos) === 0) {
object.nw_tos = buffer.readUInt8(offset + OFFSETS.nwTos)
}
if ((wildcards & WILDCARDS.nwProto) === 0) {
object.nw_proto = buffer.readUInt8(offset + OFFSETS.nwProto)
}
if (!addressParser.isIp4MaskAll(buffer, offset + OFFSETS.nwSrcMask)) {
if (!addressParser.isIp4MaskNone(buffer, offset + OFFSETS.nwSrcMask)) {
object.nw_src_mask = addressParser.ip4ToString(
buffer,
offset + OFFSETS.nwSrcMask
)
}
object.nw_src = addressParser.ip4ToString(buffer, offset + OFFSETS.nwSrc)
}
if (!addressParser.isIp4MaskAll(buffer, offset + OFFSETS.nwDstMask)) {
if (!addressParser.isIp4MaskNone(buffer, offset + OFFSETS.nwDstMask)) {
object.nw_dst_mask = addressParser.ip4ToString(
buffer,
offset + OFFSETS.nwDstMask
)
}
object.nw_dst = addressParser.ip4ToString(buffer, offset + OFFSETS.nwDst)
}
if ((wildcards & WILDCARDS.tpSrc) === 0) {
object.tp_src = buffer.readUInt16BE(offset + OFFSETS.tpSrc)
}
if ((wildcards & WILDCARDS.tpDst) === 0) {
object.tp_dst = buffer.readUInt16BE(offset + OFFSETS.tpDst)
}
if ((wildcards & WILDCARDS.mplsLabel) === 0) {
object.mpls_label = buffer.readUInt32BE(offset + OFFSETS.mplsLabel)
}
if ((wildcards & WILDCARDS.mplsTc) === 0) {
object.mpls_tc = buffer.readUInt32BE(offset + OFFSETS.mplsTc)
}
const metadataMask = [
buffer.readUInt32BE(offset + OFFSETS.metadataMask),
buffer.readUInt32BE(offset + OFFSETS.metadataMask + METADATA_LENGTH / 2),
]
if (!uIntHelper.isUInt64All(metadataMask)) {
if (!uIntHelper.isUInt64None(metadataMask)) {
object.metadata_mask = Buffer.alloc(METADATA_LENGTH)
buffer.copy(
object.metadata_mask,
0,
offset + OFFSETS.metadataMask,
offset + OFFSETS.metadataMask + METADATA_LENGTH
)
}
object.metadata = Buffer.alloc(METADATA_LENGTH)
buffer.copy(
object.metadata,
0,
offset + OFFSETS.metadata,
offset + OFFSETS.metadata + METADATA_LENGTH
)
}
return object
},
}

View File

@@ -0,0 +1,79 @@
import of from '../openflow-11'
import addressParser from '../../util/addrress-parser'
// =============================================================================
const OFFSETS = of.offsets.port
const PAD_LENGTH = 4
const PAD2_LENGTH = 2
// =============================================================================
export default {
pack: (object, buffer = undefined, offset = 0) => {
buffer = buffer !== undefined ? buffer : Buffer.alloc(of.sizes.port)
const {
port_no: portNo,
hw_addr: hwAddr,
name,
config,
state,
curr,
advertised,
supported,
peer,
curr_speed: currSpeed,
max_speed: maxSpeed,
} = object
buffer.writeUInt32BE(portNo, offset + OFFSETS.portNo)
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
addressParser.stringToEth(hwAddr, buffer, offset + OFFSETS.hwAddr)
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD2_LENGTH)
buffer.write(name, offset + OFFSETS.name, of.maxPortNameLen)
if (name.length < of.maxPortNameLen) {
buffer.fill(
0,
offset + OFFSETS.name + name.length,
offset + OFFSETS.name + of.maxPortNameLen
)
}
buffer.writeUInt32BE(config, offset + OFFSETS.config)
buffer.writeUInt32BE(state, offset + OFFSETS.state)
buffer.writeUInt32BE(curr, offset + OFFSETS.curr)
buffer.writeUInt32BE(advertised, offset + OFFSETS.advertised)
buffer.writeUInt32BE(supported, offset + OFFSETS.supported)
buffer.writeUInt32BE(peer, offset + OFFSETS.peer)
buffer.writeUInt32BE(currSpeed, offset + OFFSETS.currSpeed)
buffer.writeUInt32BE(maxSpeed, offset + OFFSETS.maxSpeed)
return buffer
},
unpack: (buffer, offset = 0) => {
const body = {}
body.port_no = buffer.readUInt32BE(offset + OFFSETS.portNo)
body.hw_addr = addressParser.ethToString(buffer, offset + OFFSETS.hwAddr)
const name = buffer.toString(
'utf8',
offset + OFFSETS.name,
offset + OFFSETS.name + of.maxPortNameLen
)
body.name = name.substr(0, name.indexOf('\0')) // Remove useless 0 if name.length < of.maxPortNameLen
body.config = buffer.readUInt32BE(offset + OFFSETS.config)
body.state = buffer.readUInt32BE(offset + OFFSETS.state)
body.curr = buffer.readUInt32BE(offset + OFFSETS.curr)
body.advertised = buffer.readUInt32BE(offset + OFFSETS.advertised)
body.supported = buffer.readUInt32BE(offset + OFFSETS.supported)
body.peer = buffer.readUInt32BE(offset + OFFSETS.peer)
body.curr_speed = buffer.readUInt32BE(offset + OFFSETS.currSpeed)
body.max_speed = buffer.readUInt32BE(offset + OFFSETS.maxSpeed)
return body
},
}

View File

@@ -0,0 +1,45 @@
import assert from 'assert'
import of from './index'
import scheme from './default-header-scheme'
import { readChunk } from '@vates/read-chunk'
// =============================================================================
export default async function* parse(socket) {
let buffer = Buffer.alloc(1024)
let data
// Read the header
while ((data = await readChunk(socket, scheme.size)) !== null) {
// Read OpenFlow message size from its header
const msgSize = data.readUInt16BE(scheme.offsets.length)
data.copy(buffer, 0, 0, scheme.size)
if (buffer.length < msgSize) {
buffer = resize(buffer, msgSize)
}
// Read the rest of the openflow message
if (msgSize > scheme.size) {
data = await readChunk(socket, msgSize - scheme.size)
assert.notStrictEqual(data, null)
data.copy(buffer, scheme.size, 0, msgSize - scheme.size)
}
yield of.unpack(buffer)
}
}
// -----------------------------------------------------------------------------
function resize(buffer, size) {
let newLength = buffer.length
do {
newLength *= 2
} while (newLength < size)
const newBuffer = Buffer.alloc(newLength)
buffer.copy(newBuffer)
return newBuffer
}

View File

@@ -0,0 +1,64 @@
import assert from 'assert'
import util from 'util'
// =============================================================================
export default {
isEthMaskNone: (buffer, offset) =>
buffer.readUInt32BE(offset) === 0x00000000 &&
buffer.readUInt16BE(offset + 4) === 0x0000,
isEthMaskAll: (buffer, offset) =>
buffer.readUInt32BE(offset) === 0xffffffff &&
buffer.readUInt16BE(offset + 4) === 0xffff,
isIp4MaskNone: (buffer, offset) => buffer.readUInt32BE(offset) === 0x00000000,
isIp4MaskAll: (buffer, offset) => buffer.readUInt32BE(offset) === 0xffffffff,
ethToString: (buffer, offset) =>
buffer.toString('hex', offset, offset + 1) +
':' +
buffer.toString('hex', offset + 1, offset + 2) +
':' +
buffer.toString('hex', offset + 2, offset + 3) +
':' +
buffer.toString('hex', offset + 3, offset + 4) +
':' +
buffer.toString('hex', offset + 4, offset + 5) +
':' +
buffer.toString('hex', offset + 5, offset + 6),
stringToEth: (string, buffer, offset) => {
const eth = /^([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2})$/.exec(
string
)
assert(eth !== null)
buffer.writeUInt8(parseInt(eth[1], 16), offset)
buffer.writeUInt8(parseInt(eth[2], 16), offset + 1)
buffer.writeUInt8(parseInt(eth[3], 16), offset + 2)
buffer.writeUInt8(parseInt(eth[4], 16), offset + 3)
buffer.writeUInt8(parseInt(eth[5], 16), offset + 4)
buffer.writeUInt8(parseInt(eth[6], 16), offset + 5)
},
ip4ToString: (buffer, offset) =>
util.format(
'%d.%d.%d.%d',
buffer.readUInt8(offset),
buffer.readUInt8(offset + 1),
buffer.readUInt8(offset + 2),
buffer.readUInt8(offset + 3)
),
stringToip4: (string, buffer, offset) => {
const ip = /^([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])$/.exec(
string
)
assert(ip !== null)
buffer.writeUInt8(parseInt(ip[1], 10), offset)
buffer.writeUInt8(parseInt(ip[2], 10), offset + 1)
buffer.writeUInt8(parseInt(ip[3], 10), offset + 2)
buffer.writeUInt8(parseInt(ip[4], 10), offset + 3)
},
}

View File

@@ -0,0 +1,11 @@
import assert from 'assert'
export default function get(map, key, errorMsg = undefined) {
const value = map[String(key)]
assert.notStrictEqual(
value,
undefined,
errorMsg !== undefined ? errorMsg : `${key} is invalid`
)
return value
}

View File

@@ -0,0 +1,10 @@
const ZERO = 0x00000000
const ALL = 0xffffffff
// =============================================================================
export default {
isUInt64None: n => n[0] === ZERO && n[1] === ZERO,
isUInt64All: n => n[0] === ALL && n[1] === ALL,
}

View File

@@ -0,0 +1,9 @@
export default {
// TODO: more version
// openFlow10: 0x01,
openFlow11: 0x02,
// openFlow12: 0x03,
// openFlow13: 0x04,
// openFlow14: 0x05,
// openFlow15: 0x06,
}

View File

@@ -19,7 +19,14 @@ Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/self-
```js
import { genSelfSigned } from '@xen-orchestra/self-signed'
console.log(await genSelfSigned())
console.log(
await genSelfSigned({
// Number of days this certificate will be valid.
//
// Default: 360
days: 600,
})
)
// {
// cert: '-----BEGIN CERTIFICATE-----\n' +
// // content…

View File

@@ -1,7 +1,14 @@
```js
import { genSelfSigned } from '@xen-orchestra/self-signed'
console.log(await genSelfSigned())
console.log(
await genSelfSigned({
// Number of days this certificate will be valid.
//
// Default: 360
days: 600,
})
)
// {
// cert: '-----BEGIN CERTIFICATE-----\n' +
// // content…

View File

@@ -10,12 +10,12 @@ const openssl = (cmd, args, { input, ...opts } = {}) =>
}
})
exports.genSelfSignedCert = async () => {
exports.genSelfSignedCert = async ({ days = 360 } = {}) => {
const key = await openssl('genrsa', ['2048'])
return {
cert: await openssl(
'req',
['-batch', '-new', '-key', '-', '-x509', '-days', '360', '-nodes'],
['-batch', '-new', '-key', '-', '-x509', '-days', String(days), '-nodes'],
{
input: key,
}

View File

@@ -33,23 +33,23 @@
"node": ">=8.10"
},
"dependencies": {
"chalk": "^2.2.0",
"chalk": "^4.1.0",
"exec-promise": "^0.7.0",
"fs-extra": "^9.0.0",
"fs-promise": "^2.0.3",
"get-stream": "^4.1.0",
"get-stream": "^6.0.0",
"http-request-plus": "^0.8.0",
"human-format": "^0.10.0",
"human-format": "^0.11.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",
"nice-pipe": "0.0.0",
"pretty-ms": "^4.0.0",
"pretty-ms": "^7.0.0",
"progress-stream": "^2.0.0",
"pw": "^0.0.4",
"strip-indent": "^2.0.0",
"xdg-basedir": "^3.0.0",
"strip-indent": "^3.0.0",
"xdg-basedir": "^4.0.0",
"xo-lib": "^0.9.0",
"xo-vmdk-to-vhd": "^1.2.1"
"xo-vmdk-to-vhd": "^1.3.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1,9 +1,176 @@
# ChangeLog
## **5.48.3** (2020-07-10)
## **5.51.0** (2020-09-30)
![Channel: latest](https://badgen.net/badge/channel/latest/yellow)
### Highlights
- [Self/VDI migration] Ability to migrate VDIs to other SRs within a resource set [#5020](https://github.com/vatesfr/xen-orchestra/issues/5020) (PR [#5201](https://github.com/vatesfr/xen-orchestra/pull/5201))
- [LDAP] Ability to import LDAP groups to XO [#1884](https://github.com/vatesfr/xen-orchestra/issues/1884) (PR [#5279](https://github.com/vatesfr/xen-orchestra/pull/5279))
- [Tasks] Show XO objects linked to pending/finished tasks [#4275](https://github.com/vatesfr/xen-orchestra/issues/4275) (PR [#5267](https://github.com/vatesfr/xen-orchestra/pull/5267))
- [Backup logs] Ability to filter by VM/pool name [#4406](https://github.com/vatesfr/xen-orchestra/issues/4406) (PR [#5208](https://github.com/vatesfr/xen-orchestra/pull/5208))
- [Backup/logs] Log's tasks pagination [#4406](https://github.com/vatesfr/xen-orchestra/issues/4406) (PR [#5209](https://github.com/vatesfr/xen-orchestra/pull/5209))
### Enhancements
- [VM Import] Make the `Description` field optional (PR [#5258](https://github.com/vatesfr/xen-orchestra/pull/5258))
- [New VM] Hide missing ISOs in selector [#5222](https://github.com/vatesfr/xen-orchestra/issues/5222)
- [Dashboard/Health] Show VMs that have too many snapshots [#5238](https://github.com/vatesfr/xen-orchestra/pull/5238)
- [Groups] Ability to delete multiple groups at once (PR [#5264](https://github.com/vatesfr/xen-orchestra/pull/5264))
### Bug fixes
- [Import VMDK] Fix `No position specified for vmdisk1` error (PR [#5255](https://github.com/vatesfr/xen-orchestra/pull/5255))
- [API] Fix `this.removeSubjectFromResourceSet is not a function` error on calling `resourceSet.removeSubject` via `xo-cli` [#5265](https://github.com/vatesfr/xen-orchestra/issues/5265) (PR [#5266](https://github.com/vatesfr/xen-orchestra/pull/5266))
- [Import OVA] Fix frozen UI when dropping a big OVA on the page (PR [#5274](https://github.com/vatesfr/xen-orchestra/pull/5274))
- [Remotes/S3] Fix S3 backup of 50GB+ files [#5197](https://github.com/vatesfr/xen-orchestra/issues/5197) (PR[ #5242](https://github.com/vatesfr/xen-orchestra/pull/5242) )
- [Import OVA] Improve import speed of embedded gzipped VMDK disks (PR [#5275](https://github.com/vatesfr/xen-orchestra/pull/5275))
- [Remotes] Fix editing bucket and directory for S3 remotes [#5233](https://github.com/vatesfr/xen-orchestra/issues/5233) (PR [5276](https://github.com/vatesfr/xen-orchestra/pull/5276))
### Packages to release
- xo-server-auth-ldap 0.9.0
- @xen-orchestra/fs 0.11.1
- xo-vmdk-to-vhd 1.3.1
- xo-server 5.67.0
- xo-web 5.71.0
## **5.50.3** (2020-09-17)
![Channel: stable](https://badgen.net/badge/channel/stable/green)
### Packages to release
- xo-server-audit 0.8.0
## **5.50.2** (2020-09-10)
### Enhancements
- [VM/network] VIF's locking mode: improve tooltip messages [#4713](https://github.com/vatesfr/xen-orchestra/issues/4713) (PR [#5227](https://github.com/vatesfr/xen-orchestra/pull/5227))
- [Backup/overview] Link log entry to its job [#4564](https://github.com/vatesfr/xen-orchestra/issues/4564) (PR [#5202](https://github.com/vatesfr/xen-orchestra/pull/5202))
### Bug fixes
- [New SR] Fix `Cannot read property 'trim' of undefined` error (PR [#5212](https://github.com/vatesfr/xen-orchestra/pull/5212))
- [Dashboard/Health] Fix suspended VDIs considered as orphans [#5248](https://github.com/vatesfr/xen-orchestra/issues/5248) (PR [#5249](https://github.com/vatesfr/xen-orchestra/pull/5249))
### Packages to release
- xo-server-audit 0.7.2
- xo-web 5.70.0
- xo-server 5.66.2
## **5.50.1** (2020-09-04)
### Enhancements
- [Usage report] Exclude replicated VMs from the VMs evolution [#4778](https://github.com/vatesfr/xen-orchestra/issues/4778) (PR [#5241](https://github.com/vatesfr/xen-orchestra/pull/5241))
### Bug fixes
- [VM/Network] Fix TX checksumming [#5234](https://github.com/vatesfr/xen-orchestra/issues/5234)
### Packages to release
- xo-server-usage-report 0.9.0
- xo-server-audit 0.7.1
- xo-server 5.66.1
## **5.50.0** (2020-08-27)
### Highlights
- [Health/Orphan VDIs] Improve heuristic and list both VDI snapshots and normal VDIs (PR [#5228](https://github.com/vatesfr/xen-orchestra/pull/5228))
- [[Audit] Regularly save fingerprints on remote server for better tempering detection](https://xen-orchestra.com/blog/xo-audit/) [#4844](https://github.com/vatesfr/xen-orchestra/issues/4844) (PR [#5077](https://github.com/vatesfr/xen-orchestra/pull/5077))
- [VM/Network] Ability to change a VIF's locking mode [#4713](https://github.com/vatesfr/xen-orchestra/issues/4713) (PR [#5188](https://github.com/vatesfr/xen-orchestra/pull/5188))
- [VM/Network] Ability to set VIF TX checksumming [#5095](https://github.com/vatesfr/xen-orchestra/issues/5095) (PR [#5182](https://github.com/vatesfr/xen-orchestra/pull/5182))
- [Host/Network] Button to refresh the list of physical interfaces [#5230](https://github.com/vatesfr/xen-orchestra/issues/5230)
- [VM] Ability to protect VM from accidental shutdown [#4773](https://github.com/vatesfr/xen-orchestra/issues/4773)
### Enhancements
- [Proxy] Improve health check error messages [#5161](https://github.com/vatesfr/xen-orchestra/issues/5161) (PR [#5191](https://github.com/vatesfr/xen-orchestra/pull/5191))
- [VM/Console] Hide missing ISOs in selector [#5222](https://github.com/vatesfr/xen-orchestra/issues/5222)
### Bug fixes
- [Proxy/deploy] Fix `no such proxy ok` error on a failure trial start (PR [#5196](https://github.com/vatesfr/xen-orchestra/pull/5196))
- [VM/snapshots] Fix redirection when creating a VM from a snapshot (PR [#5213](https://github.com/vatesfr/xen-orchestra/pull/5213))
- [User] Fix `Incorrect password` error when changing password [#5218](https://github.com/vatesfr/xen-orchestra/issues/5218) (PR [#5221](https://github.com/vatesfr/xen-orchestra/pull/5221))
- [Audit] Obfuscate sensitive data in `user.changePassword` action's records [#5219](https://github.com/vatesfr/xen-orchestra/issues/5219) (PR [#5220](https://github.com/vatesfr/xen-orchestra/pull/5220))
- [SDN Controller] Fix `Cannot read property '$network' of undefined` error at the network creation (PR [#5217](https://github.com/vatesfr/xen-orchestra/pull/5217))
### Packages to release
- xo-server-audit 0.7.0
- xo-server-sdn-controller 1.0.3
- xo-server 5.66.0
- xo-web 5.69.0
## **5.49.1** (2020-08-05)
### Enhancements
- [SR/advanced] Show thin/thick provisioning for missing SR types (PR [#5204](https://github.com/vatesfr/xen-orchestra/pull/5204))
### Bug fixes
- [Patches] Don't log errors related to missing patches listing (Previous fix in 5.48.3 was not working)
### Packages to release
- xo-server 5.64.1
- xo-server-sdn-controller 1.0.2
- xo-web 5.67.0
## **5.49.0** (2020-07-31)
### Highlights
- [Home/VM, host] Ability to filter by power state (PR [#5118](https://github.com/vatesfr/xen-orchestra/pull/5118))
- [Proxy/deploy] Ability to set HTTP proxy configuration (PR [#5145](https://github.com/vatesfr/xen-orchestra/pull/5145))
- [Import/OVA] Allow for VMDK disks inside .ova files to be gzipped (PR [#5085](https://github.com/vatesfr/xen-orchestra/pull/5085))
- [Proxy] Show pending upgrades (PR [#5167](https://github.com/vatesfr/xen-orchestra/pull/5167))
- [SDN Controller] Add/Remove netork traffic rules for a VM's VIFs (PR [#5135](https://github.com/vatesfr/xen-orchestra/pull/5135))
- [Backup/health] Show VM snapshots with missing jobs, schedules or VMs [#5086](https://github.com/vatesfr/xen-orchestra/issues/5086) (PR [#5125](https://github.com/vatesfr/xen-orchestra/pull/5125))
- [New delta backup] Show a warning icon when the advanced full backup interval setting and the backup retention are higher than 50 (PR (https://github.com/vatesfr/xen-orchestra/pull/5144))
- [VM/network] Improve the network locking mode feedback [#4713](https://github.com/vatesfr/xen-orchestra/issues/4713) (PR [#5170](https://github.com/vatesfr/xen-orchestra/pull/5170))
- [Remotes] Add AWS S3 as a backup storage
- [New VM] Only make network boot option first when the VM has no disks or when the network installation is chosen [#4980](https://github.com/vatesfr/xen-orchestra/issues/4980) (PR [#5119](https://github.com/vatesfr/xen-orchestra/pull/5119))
### Enhancements
- Log the `Invalid XML-RPC message` error as an unexpected response (PR [#5138](https://github.com/vatesfr/xen-orchestra/pull/5138))
- [VM/disks] By default, sort disks by their device position instead of their name [#5163](https://github.com/vatesfr/xen-orchestra/issues/5163) (PR [#5165](https://github.com/vatesfr/xen-orchestra/pull/5165))
- [Schedule/edit] Ability to enable/disable an ordinary job's schedule [#5026](https://github.com/vatesfr/xen-orchestra/issues/5026) (PR [#5111](https://github.com/vatesfr/xen-orchestra/pull/5111))
- [New schedule] Enable 'Enable immediately after creation' by default (PR [#5111](https://github.com/vatesfr/xen-orchestra/pull/5111))
- [Self Service] Ability to globally ignore snapshots in resource set quotas (PR [#5164](https://github.com/vatesfr/xen-orchestra/pull/5164))
- [Self] Ability to cancel a resource set edition without saving it (PR [#5174](https://github.com/vatesfr/xen-orchestra/pull/5174))
- [VIF] Ability to click an IP address to copy it to the clipboard [#5185](https://github.com/vatesfr/xen-orchestra/issues/5185) (PR [#5186](https://github.com/vatesfr/xen-orchestra/pull/5186))
### Bug fixes
- [Backup/Restore] Fixes `an error has occurred` when all backups for a specific VM have been deleted (PR [#5156](https://github.com/vatesfr/xen-orchestra/pull/5156))
- [OVA Import] Fix import of Red Hat generated .ova files (PR [#5159](https://github.com/vatesfr/xen-orchestra/pull/5159))
- [Fast clone] Fix bug where the name of the created VM would be "undefined_clone" (PR [#5173](https://github.com/vatesfr/xen-orchestra/pull/5173))
- [Audit] Fix unreadable exported records format (PR [#5179](https://github.com/vatesfr/xen-orchestra/pull/5179))
- [SDN Controller] Fixes TLS error `dh key too small` [#5074](https://github.com/vatesfr/xen-orchestra/issues/5074) (PR [#5187](https://github.com/vatesfr/xen-orchestra/pull/5187))
### Released packages
- xo-server-audit 0.6.1
- @xen-orchestra/openflow 0.1.1
- xo-server-sdn-controller 1.0.1
- xo-vmdk-to-vhd 1.3.0
- xo-remote-parser 0.6.0
- @xen-orchestra/fs 0.11.0
- xo-server 5.64.0
- xo-web 5.66.0
## **5.48.3** (2020-07-10)
### Enhancements
- [Audit] Logging user actions is now opt-in (PR [#5151](https://github.com/vatesfr/xen-orchestra/pull/5151))
@@ -102,8 +269,6 @@
## **5.47.1** (2020-06-02)
![Channel: stable](https://badgen.net/badge/channel/stable/green)
### Bug fixes
- [auth-ldap] Sign in was broken in XO 5.47.0 (PR [#5039](https://github.com/vatesfr/xen-orchestra/pull/5039))

View File

@@ -7,10 +7,24 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Host/Advanced] Add the field `IOMMU` if it is defined (PR [#5294](https://github.com/vatesfr/xen-orchestra/pull/5294))
- [Backup logs/report] Hide merge task when no merge is done (PR [#5263](https://github.com/vatesfr/xen-orchestra/pull/5263))
- [New backup] Enable created schedules by default (PR [#5280](https://github.com/vatesfr/xen-orchestra/pull/5280))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [XOA/Notifications] Don't show expired notifications (PR [#5304](https://github.com/vatesfr/xen-orchestra/pull/5304))
- [Backup/S3] Fix secret key edit form [#5233](https://github.com/vatesfr/xen-orchestra/issues/5233) (PR[#5305](https://github.com/vatesfr/xen-orchestra/pull/5305))
- [New network] Remove the possibility of creating a network on a bond member interface (PR [#5262](https://github.com/vatesfr/xen-orchestra/pull/5262))
- [User] Fix custom filters not showing up when selecting a default filter for templates (PR [#5298](https://github.com/vatesfr/xen-orchestra/pull/5298))
- [Self/VDI migration] Fix hidden VDI after migration (PR [#5296](https://github.com/vatesfr/xen-orchestra/pull/5296))
- [Self/VDI migration] Fix `not enough permissions` error (PR [#5299](https://github.com/vatesfr/xen-orchestra/pull/5299))
- [Home] Hide backup filter for non-admin users [#5285](https://github.com/vatesfr/xen-orchestra/issues/5285) (PR [#5264](https://github.com/vatesfr/xen-orchestra/pull/5264))
- [Backup/S3] Fix request signature error [#5253](https://github.com/vatesfr/xen-orchestra/issues/5253) (PR[#5315](https://github.com/vatesfr/xen-orchestra/pull/5315))
- [SDN Controller] Fix tunnel traffic going on the wrong NIC: see https://xcp-ng.org/forum/topic/3544/mtu-problems-with-vxlan. (PR [#5281](https://github.com/vatesfr/xen-orchestra/pull/5281))
### Packages to release
> Packages will be released in the order they are here, therefore, they should
@@ -27,3 +41,8 @@
> - major: if the change breaks compatibility
>
> In case of conflict, the highest (lowest in previous list) `$version` wins.
- xo-server-sdn-controller patch
- xo-server-backup-reports patch
- xo-server minor
- xo-web minor

BIN
docs/assets/add-rule.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

BIN
docs/assets/show-rules.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

View File

@@ -83,3 +83,7 @@ To check your free space, enter your XOA and run `xoa check` to check free syste
This is happening when you have a _smart backup job_ that doesn't match any VMs. For example: you created a job to backup all running VMs. If no VMs are running on backup schedule, you'll have this message. This could also happen if you lost connection with your pool master (the VMs aren't visible anymore from Xen Orchestra).
Edit your job and try to see matching VMs or check if your pool is connected to XOA.
## Error: SR_OPERATION_NOT_SUPPORTED
This error can be caused by leaving any removable device (such as USB storage) attached to the VM that you are backing up or snapshotting, detach the device and retry. This can also be caused if you created a VM disk using the [RAW format](https://xcp-ng.org/docs/storage.html#using-raw-format).

View File

@@ -72,7 +72,7 @@ Now if you do this:
It means any VMs on "Lab Pool" with the "prod" tag will be backed up.
## RAM Enabled bakcup
## RAM Enabled backup
:::tip
This feature is **only compatible** with XCP-ng 8.0 or more recent. Citrix Hypervisor didn't yet merge our changes, despite we contributed to their code directly.

View File

@@ -14,6 +14,18 @@ This section will cover the license management system for commercial editions of
![](./assets/activate-confirm.png)
## Rebind XO license
:::warning
Once a license is bind, the only way to unbind it is to contact us with a [support ticket](https://xen-orchestra.com/#!/member/support)!
A license can only be bind to a single appliance at the same time, rebind your license will unbind the license from any other appliance.
:::
To rebind your Xen Orchestra appliance, you simply need to connect on the **appliance on which you want to bind the license** and click on the rebind option (Move license to this XOA button) in the license section
![](./assets/rebind-screen.png)
You will then have a confirmation screen
![](./assets/confirm-rebind.png)
Once it's done, you simply need to proceed to an upgrade on your freshly binded appliance to download the correct edition packages.

View File

@@ -36,7 +36,7 @@ In the network creation view:
:::tip
- All hosts in a private network must be able to reach the other hosts' management interface.
- All hosts in a private network must be able to reach the other hosts' management interface and all hosts must be able to reach one another on the interface selected for private networks creation.
> The term management interface is used to indicate the IP-enabled NIC that carries the management traffic.
- Only 1 encrypted GRE network and 1 encrypted VxLAN network per pool can exist at a time due to Open vSwitch limitation.
:::
@@ -75,6 +75,32 @@ Encryption is not available prior to XCP-ng 8.0.
## OpenFlow rules
Please see the [devblog about OpenFlow rules](https://xen-orchestra.com/blog/vms-vif-network-traffic-control/).
This feature requires the OpenFlow port to be opened
In the VM network tab a new column has been added: _Network rules_.
- The _Add rule_ button display a form to add a new rule choosing to:
- enable/disable the matching traffic
- for a specific protocol (optionnal)
- on a specific port (optionnal)
- matching a specific IP or IP range (optionnal)
- coming from the VIF / going to the VIF / both
- The _Show rules_ button allow to display all rules for a VIF.
- When the rules are display a button to delete a rule is available.
![](./assets/add-rule.png)
![](./assets/show-rules.png)
:::tip
This feature is about to be released soon. Stay tuned!
:::
- This feature requires the OpenFlow port (TCP 6653) to be opened. (See [the requirements](#openflow))
:::
### Requirements
### Openflow
- On XCP-ng prior to 8.0:
- To be able to use `OpenFlow`, the following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `OpenFlow` is wanted: `-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m tcp --dport 6653 -j ACCEPT`

View File

@@ -237,7 +237,11 @@ Then, you can define quotas on this set:
- max disk usage
:::tip
Snapshotting a VM within a self-service will _not_ use the quota from the resource set. The same rule applies for backups and replication.
Replicated VMs and snapshots created by a backup job don't use quotas.
:::
:::tip
A snapshot of a Self Service VM will use as much resources as a VM would. You can disable this by setting `ignoreVmSnapshotResources` to `true` in the `selfService` section of `xo-server`'s config.
:::
When you click on create, you can see the resource set and remove or edit it:

View File

@@ -6,7 +6,7 @@
"babel-eslint": "^10.0.1",
"babel-jest": "^26.0.1",
"benchmark": "^2.1.4",
"eslint": "^6.0.1",
"eslint": "^7.6.0",
"eslint-config-prettier": "^6.0.0",
"eslint-config-standard": "^14.1.0",
"eslint-config-standard-jsx": "^8.1.0",
@@ -17,7 +17,7 @@
"eslint-plugin-react": "^7.6.1",
"eslint-plugin-standard": "^4.0.0",
"exec-promise": "^0.7.0",
"flow-bin": "^0.126.0",
"flow-bin": "^0.131.0",
"globby": "^11.0.1",
"handlebars": "^4.7.6",
"husky": "^4.2.5",
@@ -39,6 +39,15 @@
},
"jest": {
"collectCoverage": true,
"moduleNameMapper": {
"^.": "./src",
"^(@vates/[^/]+)": "$1/src",
"^(@xen-orchestra/[^/]+)": "$1/src",
"^(value-matcher)": "$1/src",
"^(vhd-cli)": "$1/src",
"^(vhd-lib)": "$1/src",
"^(xo-[^/]+)": "$1/src"
},
"projects": [
"<rootDir>"
],
@@ -65,12 +74,11 @@
"dev-test": "jest --bail --watch \"^(?!.*\\.integ\\.spec\\.js$)\"",
"docs:dev": "vuepress dev docs",
"docs:build": "vuepress build docs",
"posttest": "scripts/run-script test",
"prepare": "scripts/run-script prepare",
"pretest": "eslint --ignore-path .gitignore .",
"prettify": "prettier --ignore-path .gitignore --write '**/*.{js,jsx,md,mjs,ts,tsx}'",
"test": "jest \"^(?!.*\\.integ\\.spec\\.js$)\"",
"test": "npm run test-lint && npm run test-unit",
"test-integration": "jest \".integ\\.spec\\.js$\"",
"test-lint": "eslint --ignore-path .gitignore .",
"test-unit": "jest \"^(?!.*\\.integ\\.spec\\.js$)\" && scripts/run-script test",
"travis-tests": "scripts/travis-tests"
},
"workspaces": [

View File

@@ -16,7 +16,6 @@ Installation of the [npm package](https://npmjs.org/package/value-matcher):
```js
import { createPredicate } from 'value-matcher'
;[
{ user: 'sam', age: 65, active: false },
{ user: 'barney', age: 36, active: true },

View File

@@ -1,6 +1,5 @@
```js
import { createPredicate } from 'value-matcher'
;[
{ user: 'sam', age: 65, active: false },
{ user: 'barney', age: 36, active: true },

View File

@@ -43,7 +43,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}

View File

@@ -28,7 +28,7 @@
"node": ">=8.10"
},
"dependencies": {
"@xen-orchestra/fs": "^0.10.4",
"@xen-orchestra/fs": "^0.11.1",
"cli-progress": "^3.1.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
@@ -52,7 +52,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -36,12 +36,12 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"@xen-orchestra/fs": "^0.10.4",
"@xen-orchestra/fs": "^0.11.1",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"execa": "^4.0.2",
"fs-promise": "^2.0.0",
"get-stream": "^5.1.0",
"get-stream": "^6.0.0",
"index-modules": "^0.3.0",
"readable-stream": "^3.0.6",
"rimraf": "^3.0.0",
@@ -53,7 +53,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
},
"author": {

View File

@@ -37,9 +37,9 @@
},
"dependencies": {
"archy": "^1.0.0",
"chalk": "^3.0.0",
"chalk": "^4.1.0",
"exec-promise": "^0.7.0",
"human-format": "^0.10.0",
"human-format": "^0.11.0",
"lodash": "^4.17.4",
"pw": "^0.0.4",
"xen-api": "^0.29.0"

View File

@@ -2,7 +2,7 @@
"dependencies": {
"getopts": "^2.2.3",
"golike-defer": "^0.4.1",
"human-format": "^0.10.1",
"human-format": "^0.11.0",
"process-top": "^1.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.13.0",

View File

@@ -40,7 +40,7 @@
"event-to-promise": "^0.8.0",
"exec-promise": "^0.7.0",
"http-request-plus": "^0.8.0",
"jest-diff": "^24.0.0",
"jest-diff": "^26.4.2",
"json-rpc-protocol": "^0.13.1",
"kindof": "^2.0.0",
"lodash": "^4.17.4",

View File

@@ -34,17 +34,17 @@
},
"dependencies": {
"bluebird": "^3.5.1",
"chalk": "^3.0.0",
"chalk": "^4.1.0",
"exec-promise": "^0.7.0",
"fs-promise": "^2.0.3",
"http-request-plus": "^0.9.1",
"human-format": "^0.10.0",
"human-format": "^0.11.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",
"micromatch": "^4.0.2",
"mkdirp": "^0.5.1",
"mkdirp": "^1.0.4",
"nice-pipe": "0.0.0",
"pretty-ms": "^5.0.0",
"pretty-ms": "^7.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.15.0",
"pump": "^3.0.0",

View File

@@ -8,7 +8,7 @@ const readFile = promisify(require('fs').readFile)
const writeFile = promisify(require('fs').writeFile)
const l33t = require('l33teral')
const mkdirp = promisify(require('mkdirp'))
const mkdirp = require('mkdirp')
const xdgBasedir = require('xdg-basedir')
// ===================================================================

View File

@@ -41,7 +41,7 @@
"end-of-stream": "^1.1.0",
"exec-promise": "^0.7.0",
"highland": "^2.10.1",
"through2": "^3.0.0",
"through2": "^4.0.2",
"xo-lib": "^0.9.0"
},
"devDependencies": {

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xo-remote-parser",
"version": "0.5.0",
"version": "0.6.0",
"license": "AGPL-3.0-or-later",
"description": "",
"keywords": [],
@@ -26,7 +26,8 @@
"node": ">=6"
},
"dependencies": {
"lodash": "^4.13.1"
"lodash": "^4.13.1",
"url-parse": "^1.4.7"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -42,7 +43,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -2,6 +2,7 @@ import filter from 'lodash/filter'
import map from 'lodash/map'
import trim from 'lodash/trim'
import trimStart from 'lodash/trimStart'
import Url from 'url-parse'
const NFS_RE = /^([^:]+):(?:(\d+):)?([^:]+)$/
const SMB_RE = /^([^:]+):(.+)@([^@]+)\\\\([^\0]+)(?:\0(.*))?$/
@@ -39,6 +40,13 @@ export const parse = string => {
object.domain = domain
object.username = username
object.password = password
} else if (type === 's3') {
const parsed = new Url(string)
object.type = 's3'
object.host = parsed.host
object.path = parsed.pathname
object.username = parsed.username
object.password = decodeURIComponent(parsed.password)
}
return object
}
@@ -60,6 +68,9 @@ export const format = ({
if (type === 'smb') {
string += `${username}:${password}@${domain}\\\\${host}`
}
if (type === 's3') {
string += `${username}:${encodeURIComponent(password)}@${host}`
}
path = sanitizePath(path)
if (type === 'smb') {
path = path.split('/')

View File

@@ -44,6 +44,17 @@ const data = deepFreeze({
path: '/media/nfs',
},
},
S3: {
string:
's3://AKIAS:XSuBupZ0mJlu%2B@s3-us-west-2.amazonaws.com/test-bucket/dir',
object: {
type: 's3',
host: 's3-us-west-2.amazonaws.com',
path: '/test-bucket/dir',
username: 'AKIAS',
password: 'XSuBupZ0mJlu+',
},
},
})
const parseData = deepFreeze({

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-audit",
"version": "0.6.0",
"version": "0.8.0",
"license": "AGPL-3.0-or-later",
"description": "Audit plugin for XO-Server",
"keywords": [
@@ -36,6 +36,7 @@
"devDependencies": {
"@babel/cli": "^7.7.0",
"@babel/core": "^7.7.2",
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.0.0",
"@babel/preset-env": "^7.7.1",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
@@ -49,8 +50,10 @@
},
"dependencies": {
"@xen-orchestra/audit-core": "^0.1.1",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/log": "^0.2.0",
"async-iterator-to-stream": "^1.1.0",
"lodash": "^4.17.19",
"promise-toolbox": "^0.15.0",
"readable-stream": "^3.5.0",
"xo-common": "^0.5.0"

View File

@@ -2,6 +2,7 @@ import asyncIteratorToStream from 'async-iterator-to-stream'
import createLogger from '@xen-orchestra/log'
import { alteredAuditRecord, missingAuditRecord } from 'xo-common/api-errors'
import { createGzip } from 'zlib'
import { createSchedule } from '@xen-orchestra/cron'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import {
@@ -18,6 +19,7 @@ const DEFAULT_BLOCKED_LIST = {
'acl.get': true,
'acl.getCurrentPermissions': true,
'audit.checkIntegrity': true,
'audit.clean': true,
'audit.generateFingerprint': true,
'audit.getRecords': true,
'backup.list': true,
@@ -113,6 +115,30 @@ class Db extends Storage {
getLastId() {
return this.get(LAST_ID)
}
async clean() {
const db = this._db
// delete first so that a new chain can be constructed even if anything else fails
await db.del(LAST_ID)
return new Promise((resolve, reject) => {
let count = 1
const cb = () => {
if (--count === 0) {
resolve()
}
}
const deleteEntry = key => {
++count
db.del(key, cb)
}
db.createKeyStream()
.on('data', deleteEntry)
.on('end', cb)
.on('error', reject)
})
}
}
export const configurationSchema = {
@@ -135,6 +161,15 @@ class AuditXoPlugin {
this._cleaners = []
this._xo = xo
const { enabled = true, schedule: { cron = '0 6 * * *', timezone } = {} } =
staticConfig.lastHashUpload ?? {}
if (enabled) {
this._uploadLastHashJob = createSchedule(cron, timezone).createJob(() =>
this._uploadLastHash().catch(log.error)
)
}
this._auditCore = undefined
this._storage = undefined
@@ -193,10 +228,21 @@ class AuditXoPlugin {
oldest: { type: 'string', optional: true },
}
const uploadLastHashJob = this._uploadLastHashJob
if (uploadLastHashJob !== undefined) {
uploadLastHashJob.start()
cleaners.push(() => uploadLastHashJob.stop())
}
const clean = this._storage.clean.bind(this._storage)
clean.permission = 'admin'
clean.description = 'Clean audit database'
cleaners.push(
this._xo.addApiMethods({
audit: {
checkIntegrity,
clean,
exportRecords,
generateFingerprint,
getRecords,
@@ -272,7 +318,7 @@ class AuditXoPlugin {
(req, res) => {
res.set({
'content-disposition': 'attachment',
'content-type': 'application/json',
'content-type': 'application/x-gzip',
})
return fromCallback(
pipeline,
@@ -285,7 +331,7 @@ class AuditXoPlugin {
{
suffix: `/audit-records-${new Date()
.toISOString()
.replace(/:/g, '_')}.gz`,
.replace(/:/g, '_')}.ndjson.gz`,
}
)
.then($getFrom => ({
@@ -293,6 +339,60 @@ class AuditXoPlugin {
}))
}
// See www-xo#344
async _uploadLastHash() {
const xo = this._xo
// In case of non-existent XOA plugin
if (xo.audit === undefined) {
return
}
const lastRecordId = await this._storage.getLastId()
if (lastRecordId === undefined) {
return
}
const chain = await xo.audit.getLastChain()
let lastValidHash
if (chain !== null) {
const hashes = chain.hashes
lastValidHash = hashes[hashes.length - 1]
if (lastValidHash === lastRecordId) {
return
}
// check the integrity of all stored hashes
try {
for (let i = 0; i < hashes.length - 1; ++i) {
await this._checkIntegrity({
oldest: hashes[i],
newest: hashes[i + 1],
})
}
} catch (error) {
if (!missingAuditRecord.is(error) && !alteredAuditRecord.is(error)) {
throw error
}
lastValidHash = undefined
}
}
// generate a valid fingerprint of all stored records in case of a failure integrity check
const { oldest, newest, error } = await this._generateFingerprint({
oldest: lastValidHash,
})
if (lastValidHash === undefined || error !== undefined) {
await xo.audit.startNewChain({ oldest, newest })
} else {
await xo.audit.extendLastChain({ oldest, newest })
}
}
async _checkIntegrity(props) {
const { oldest = NULL_ID, newest = await this._storage.getLastId() } = props
return this._auditCore.checkIntegrity(oldest, newest).catch(error => {
@@ -311,14 +411,18 @@ class AuditXoPlugin {
try {
return {
fingerprint: `${oldest}|${newest}`,
newest,
nValid: await this._checkIntegrity({ oldest, newest }),
oldest,
}
} catch (error) {
if (missingAuditRecord.is(error) || alteredAuditRecord.is(error)) {
return {
fingerprint: `${error.data.id}|${newest}`,
nValid: error.data.nValid,
error,
fingerprint: `${error.data.id}|${newest}`,
newest,
nValid: error.data.nValid,
oldest: error.data.id,
}
}
throw error

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-ldap",
"version": "0.8.1",
"version": "0.9.0",
"license": "AGPL-3.0-or-later",
"description": "LDAP authentication plugin for XO-Server",
"keywords": [
@@ -34,6 +34,7 @@
"node": ">=10"
},
"dependencies": {
"@babel/plugin-proposal-optional-chaining": "^7.11.0",
"exec-promise": "^0.7.0",
"inquirer": "^7.0.0",
"ldapts": "^2.2.1",

View File

@@ -32,10 +32,12 @@ export const configurationSchema = {
type: 'object',
properties: {
uri: {
title: 'URI',
description: 'URI of the LDAP server.',
type: 'string',
},
certificateAuthorities: {
title: 'Certificate Authorities',
description: `
Paths to CA certificates to use when connecting to SSL-secured LDAP servers.
@@ -47,12 +49,24 @@ If not specified, it will use a default set of well-known CAs.
},
},
checkCertificate: {
title: 'Check certificate',
description:
"Enforce the validity of the server's certificates. You can disable it when connecting to servers that use a self-signed certificate.",
type: 'boolean',
defaults: DEFAULTS.checkCertificate,
},
startTls: {
title: 'Use StartTLS',
type: 'boolean',
},
base: {
title: 'Base',
description:
'The base is the part of the description tree where the users and groups are looked for.',
type: 'string',
},
bind: {
title: 'Credentials',
description: 'Credentials to use before looking for the user record.',
type: 'object',
properties: {
@@ -74,12 +88,8 @@ For Microsoft Active Directory, it can also be \`<user>@<domain>\`.
},
required: ['dn', 'password'],
},
base: {
description:
'The base is the part of the description tree where the users are looked for.',
type: 'string',
},
filter: {
title: 'User filter',
description: `
Filter used to find the user.
@@ -102,9 +112,67 @@ Or something like this if you also want to filter by group:
type: 'string',
default: DEFAULTS.filter,
},
startTls: {
title: 'Use StartTLS',
type: 'boolean',
userIdAttribute: {
title: 'ID attribute',
description:
'Attribute used to map LDAP user to XO user. Must be unique. e.g.: `dn`',
type: 'string',
},
groups: {
title: 'Synchronize groups',
description: 'Import groups from LDAP directory',
type: 'object',
properties: {
base: {
title: 'Base',
description: 'Where to look for the groups.',
type: 'string',
},
filter: {
title: 'Filter',
description:
'Filter used to find the groups. e.g.: `(objectClass=groupOfNames)`',
type: 'string',
},
idAttribute: {
title: 'ID attribute',
description:
'Attribute used to map LDAP group to XO group. Must be unique. e.g.: `gid`',
type: 'string',
},
displayNameAttribute: {
title: 'Display name attribute',
description:
"Attribute used to determine the group's name in XO. e.g.: `cn`",
type: 'string',
},
membersMapping: {
title: 'Members mapping',
type: 'object',
properties: {
groupAttribute: {
title: 'Group attribute',
description:
'Attribute used to find the members of a group. e.g.: `memberUid`. The values must reference the user IDs (cf. user ID attribute)',
type: 'string',
},
userAttribute: {
title: 'User attribute',
description:
'User attribute used to match group members to the users. e.g.: `uidNumber`',
type: 'string',
},
},
required: ['groupAttribute', 'userAttribute'],
},
},
required: [
'base',
'filter',
'idAttribute',
'displayNameAttribute',
'membersMapping',
],
},
},
required: ['uri', 'base'],
@@ -166,12 +234,18 @@ class AuthLdap {
base: searchBase,
filter: searchFilter = DEFAULTS.filter,
startTls = false,
groups,
uri,
userIdAttribute,
} = conf
this._credentials = credentials
this._serverUri = uri
this._searchBase = searchBase
this._searchFilter = searchFilter
this._startTls = startTls
this._groupsConfig = groups
this._userIdAttribute = userIdAttribute
}
load() {
@@ -238,7 +312,31 @@ class AuthLdap {
`successfully bound as ${entry.dn} => ${username} authenticated`
)
logger(JSON.stringify(entry, null, 2))
return { username }
let user
if (this._userIdAttribute === undefined) {
// Support legacy config
user = await this._xo.registerUser(undefined, username)
} else {
const ldapId = entry[this._userIdAttribute]
user = await this._xo.registerUser2('ldap', {
user: { id: ldapId, name: username },
})
const groupsConfig = this._groupsConfig
if (groupsConfig !== undefined) {
try {
await this._synchronizeGroups(
user,
entry[groupsConfig.membersMapping.userAttribute]
)
} catch(error) {
logger(`failed to synchronize groups: ${error.message}`)
}
}
}
return { userId: user.id }
} catch (error) {
logger(`failed to bind as ${entry.dn}: ${error.message}`)
}
@@ -250,6 +348,146 @@ class AuthLdap {
await client.unbind()
}
}
// Synchronize user's groups OR all groups if no user is passed
async _synchronizeGroups(user, memberId) {
const logger = this._logger
const client = new Client(this._clientOpts)
try {
if (this._startTls) {
await client.startTLS(this._tlsOptions)
}
// Bind if necessary.
{
const { _credentials: credentials } = this
if (credentials) {
logger(`attempting to bind with as ${credentials.dn}...`)
await client.bind(credentials.dn, credentials.password)
logger(`successfully bound as ${credentials.dn}`)
}
}
logger('syncing groups...')
const {
base,
displayNameAttribute,
filter,
idAttribute,
membersMapping,
} = this._groupsConfig
const { searchEntries: ldapGroups } = await client.search(base, {
scope: 'sub',
filter: filter || '', // may be undefined
})
const xoUsers =
user !== undefined &&
(await this._xo.getAllUsers()).filter(
user =>
user.authProviders !== undefined && 'ldap' in user.authProviders
)
const xoGroups = await this._xo.getAllGroups()
// For each LDAP group:
// - create/update/delete the corresponding XO group
// - add/remove the LDAP-provided users
// One by one to avoid race conditions
for (const ldapGroup of ldapGroups) {
const groupLdapId = ldapGroup[idAttribute]
const groupLdapName = ldapGroup[displayNameAttribute]
// Empty or undefined names/IDs are invalid
if (!groupLdapId || !groupLdapName) {
logger(`Invalid group ID (${groupLdapId}) or name (${groupLdapName})`)
continue
}
let ldapGroupMembers = ldapGroup[membersMapping.groupAttribute]
ldapGroupMembers = Array.isArray(ldapGroupMembers)
? ldapGroupMembers
: [ldapGroupMembers]
// If a user was passed, only update the user's groups
if (user !== undefined && !ldapGroupMembers.includes(memberId)) {
continue
}
let xoGroup
const xoGroupIndex = xoGroups.findIndex(
group =>
group.provider === 'ldap' && group.providerGroupId === groupLdapId
)
if (xoGroupIndex === -1) {
if (
xoGroups.find(group => group.name === groupLdapName) !== undefined
) {
// TODO: check against LDAP groups that are being created as well
logger(`A group called ${groupLdapName} already exists`)
continue
}
xoGroup = await this._xo.createGroup({
name: groupLdapName,
provider: 'ldap',
providerGroupId: groupLdapId,
})
} else {
// Remove it from xoGroups as we will then delete all the remaining
// LDAP-provided groups
;[xoGroup] = xoGroups.splice(xoGroupIndex, 1)
await this._xo.updateGroup(xoGroup.id, { name: groupLdapName })
xoGroup = await this._xo.getGroup(xoGroup.id)
}
// If a user was passed, only add that user to the group and don't
// delete any groups (ie return immediately)
if (user !== undefined) {
await this._xo.addUserToGroup(user.id, xoGroup.id)
continue
}
const xoGroupMembers =
xoGroup.users === undefined ? [] : xoGroup.users.slice(0)
for (const ldapId of ldapGroupMembers) {
const xoUser = xoUsers.find(
user => user.authProviders.ldap.id === ldapId
)
if (xoUser === undefined) {
continue
}
// If the user exists in XO, should be a member of the LDAP-provided
// group but isn't: add it
const userIdIndex = xoGroupMembers.findIndex(id => id === xoUser.id)
if (userIdIndex !== -1) {
xoGroupMembers.splice(userIdIndex, 1)
continue
}
await this._xo.addUserToGroup(xoUser.id, xoGroup.id)
}
// All the remaining users of that group can be removed from it since
// they're not in the LDAP group
for (const userId of xoGroupMembers) {
await this._xo.removeUserFromGroup(userId, xoGroup.id)
}
}
if (user === undefined) {
// All the remaining groups provided by LDAP can be removed from XO since
// they don't exist in the LDAP directory any more
await Promise.all(
xoGroups
.filter(group => group.provider === 'ldap')
.map(group => this._xo.deleteGroup(group.id))
)
}
} finally {
await client.unbind()
}
}
}
// ===================================================================

View File

@@ -38,7 +38,7 @@
"dependencies": {
"@xen-orchestra/defined": "^0.0.0",
"@xen-orchestra/log": "^0.2.0",
"human-format": "^0.10.0",
"human-format": "^0.11.0",
"lodash": "^4.13.1",
"moment-timezone": "^0.5.13"
},

View File

@@ -513,6 +513,8 @@ class BackupReportsXoPlugin {
} else {
globalTransferSize += size
}
} else if (operationLog.status === 'success') {
return
}
const operationText = [

View File

@@ -22,7 +22,7 @@
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.6",
"d3-time-format": "^2.1.1",
"d3-time-format": "^3.0.0",
"json5": "^2.0.1",
"lodash": "^4.17.4"
},

View File

@@ -16,7 +16,7 @@
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"version": "0.4.3",
"version": "1.0.3",
"engines": {
"node": ">=8.10"
},
@@ -30,6 +30,9 @@
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"@xen-orchestra/openflow": "^0.1.1",
"@vates/coalesce-calls": "^0.1.0",
"ipaddr.js": "^1.9.1",
"lodash": "^4.17.11",
"node-openssl-cert": "^0.0.117",
"promise-toolbox": "^0.15.0",

View File

@@ -1,15 +1,17 @@
import assert from 'assert'
import createLogger from '@xen-orchestra/log'
import NodeOpenssl from 'node-openssl-cert'
import uuidv4 from 'uuid/v4'
import { access, constants, readFile, writeFile } from 'fs'
import { EventEmitter } from 'events'
import { filter, find, forOwn, map, omitBy } from 'lodash'
import { fromCallback, promisify } from 'promise-toolbox'
import { join } from 'path'
import { v4 as uuidv4 } from 'uuid'
import { OpenFlowChannel } from './protocol/openflow-channel'
import { OvsdbClient } from './protocol/ovsdb-client'
import { PrivateNetwork } from './private-network/private-network'
import { TlsHelper } from './utils/tls-helper'
// =============================================================================
@@ -48,6 +50,10 @@ export const configurationSchema = {
// =============================================================================
const noop = Function.prototype
// -----------------------------------------------------------------------------
const fileWrite = promisify(writeFile)
const fileRead = promisify(readFile)
async function fileExists(path) {
@@ -235,12 +241,28 @@ async function createTunnel(host, network) {
return
}
const encapsulation = otherConfig['xo:sdn-controller:encapsulation'] ?? 'gre'
try {
const tunnelRef = await host.$xapi.call(
'tunnel.create',
hostPif.$ref,
network.$ref
)
let tunnelRef
try {
tunnelRef = await host.$xapi.call(
'tunnel.create',
hostPif.$ref,
network.$ref,
encapsulation
)
} catch (error) {
if (error.code === 'MESSAGE_PARAMETER_COUNT_MISMATCH') {
// Before 8.2, protocol field did not exist, let's try without it!
tunnelRef = await host.$xapi.call(
'tunnel.create',
hostPif.$ref,
network.$ref
)
} else {
throw error
}
}
const tunnel = await host.$xapi._getOrWaitObject(tunnelRef)
await tunnel.$xapi._waitObjectState(
tunnel.access_PIF,
@@ -312,6 +334,10 @@ class SDNController extends EventEmitter {
- `status`:
- `active`: `true` if the corresponding OpenVSwitch bridge is correctly configured and working
- `key` : Corresponding OpenVSwitch bridge name (missing if `active` is `false`)
Attributes on VIFs (OpenFlow entries):
- `other_config`:
- `xo:sdn-controller:of-rules`: A list of openflow entries to aply to this VIF
*/
constructor({ xo, getDataDir }) {
@@ -335,6 +361,12 @@ class SDNController extends EventEmitter {
this._prevVni = 0
this.ovsdbClients = {}
this.ofChannels = {}
this._tlsHelper = new TlsHelper()
this._handledTasks = []
this._managed = []
}
// ---------------------------------------------------------------------------
@@ -369,10 +401,11 @@ class SDNController extends EventEmitter {
fileRead(join(certDirectory, CLIENT_CERT)),
fileRead(join(certDirectory, CA_CERT)),
])
forOwn(this.ovsdbClients, client => {
client.updateCertificates(this._clientKey, this._clientCert, this._caCert)
})
this._tlsHelper.updateCertificates(
this._clientKey,
this._clientCert,
this._caCert
)
const updatedPools = []
await Promise.all(
map(this.privateNetworks, async privateNetworks => {
@@ -390,7 +423,8 @@ class SDNController extends EventEmitter {
}
async load() {
// Expose method to create private network
// ---------------- Private Network method ---------------------------------
const createPrivateNetwork = params =>
this._createPrivateNetwork({
encrypted: false,
@@ -421,10 +455,41 @@ class SDNController extends EventEmitter {
mtu: { type: 'integer', optional: true },
preferredCenterId: { type: 'string', optional: true },
}
createPrivateNetwork.permission = 'admin'
// ---------------- OpenFlow rules method ----------------------------------
const addRule = params => this._addRule(params)
addRule.description = 'Add an ACL rule to a VIF'
addRule.params = {
allow: { type: 'boolean' },
direction: { type: 'string' },
ipRange: { type: 'string', optional: true },
port: { type: 'integer', optional: true },
protocol: { type: 'string', optional: true },
vifId: { type: 'string' },
}
addRule.permission = 'admin'
const deleteRule = params => this._deleteRule(params)
deleteRule.description = 'Delete an ACL rule from a VIF'
deleteRule.params = {
direction: { type: 'string' },
ipRange: { type: 'string', optional: true },
port: { type: 'integer', optional: true },
protocol: { type: 'string', optional: true },
vifId: { type: 'string' },
}
deleteRule.permission = 'admin'
// -------------------------------------------------------------------------
this._unsetApiMethods = this._xo.addApiMethods({
sdnController: {
createPrivateNetwork,
addRule,
deleteRule,
},
})
@@ -456,6 +521,10 @@ class SDNController extends EventEmitter {
this._cleaners = []
this.ovsdbClients = {}
this.ofChannels = {}
this._handledTasks = []
this._managed = []
this._unsetApiMethods()
}
@@ -483,7 +552,8 @@ class SDNController extends EventEmitter {
this._cleaners.push(await this._manageXapi(xapi))
const hosts = filter(xapi.objects.all, { $type: 'host' })
for (const host of hosts) {
this._createOvsdbClient(host)
this._getOrCreateOvsdbClient(host)
this._getOrCreateOfChannel(host)
}
// Add already existing private networks
@@ -596,6 +666,13 @@ class SDNController extends EventEmitter {
await this._electNewCenter(privateNetwork)
})
)
// -----------------------------------------------------------------------
const vifs = filter(xapi.objects.all, { $type: 'VIF' })
for (const vif of vifs) {
await this._applyVifOfRules(vif)
}
} catch (error) {
log.error('Error while handling xapi connection', {
id: xapi.pool.uuid,
@@ -632,6 +709,112 @@ class SDNController extends EventEmitter {
// ===========================================================================
async _addRule({ allow, direction, ipRange = '', port, protocol, vifId }) {
const vif = this._xo.getXapiObject(this._xo.getObject(vifId, 'VIF'))
try {
assert(vif.currently_attached, 'VIF needs to be plugged to add rule')
await this._setPoolControllerIfNeeded(vif.$pool)
const client = this._getOrCreateOvsdbClient(vif.$VM.$resident_on)
const channel = this._getOrCreateOfChannel(vif.$VM.$resident_on)
const ofport = await client.getOfPortForVif(vif)
await channel.addRule(
vif,
allow,
protocol,
port,
ipRange,
direction,
ofport
)
const vifRules = vif.other_config['xo:sdn-controller:of-rules']
const newVifRules = vifRules !== undefined ? JSON.parse(vifRules) : []
const stringRule = JSON.stringify({
allow,
protocol,
port,
ipRange,
direction,
})
if (!newVifRules.includes(stringRule)) {
newVifRules.push(stringRule)
await vif.update_other_config(
'xo:sdn-controller:of-rules',
JSON.stringify(newVifRules)
)
}
} catch (error) {
log.error('Error while adding OF rule', {
error,
vif: vif.uuid,
host: vif.$VM.$resident_on.uuid,
allow,
protocol,
port,
ipRange,
direction,
})
}
}
async _deleteRule(
{ direction, ipRange = '', port, protocol, vifId },
updateOtherConfig = true
) {
let vif = this._xo.getXapiObject(this._xo.getObject(vifId, 'VIF'))
try {
await this._setPoolControllerIfNeeded(vif.$pool)
const client = this._getOrCreateOvsdbClient(vif.$VM.$resident_on)
const channel = this._getOrCreateOfChannel(vif.$VM.$resident_on)
const ofport = await client.getOfPortForVif(vif)
await channel.deleteRule(vif, protocol, port, ipRange, direction, ofport)
if (!updateOtherConfig) {
return
}
const vifRules = vif.other_config['xo:sdn-controller:of-rules']
if (vifRules === undefined) {
// Nothing to do
return
}
const newVifRules = JSON.parse(vifRules).filter(vifRule => {
const rule = JSON.parse(vifRule)
return (
rule.protocol !== protocol ||
rule.port !== port ||
rule.ipRange !== ipRange ||
rule.direction !== direction
)
})
await vif.update_other_config(
'xo:sdn-controller:of-rules',
Object.keys(newVifRules).length === 0
? null
: JSON.stringify(newVifRules)
)
vif = await vif.$xapi.barrier(vif.$ref)
// Put back rules that could have been wrongfully deleted because delete rule too general
await this._applyVifOfRules(vif)
} catch (error) {
log.error('Error while adding OF rule', {
error,
vif: vif.uuid,
host: vif.$VM.$resident_on.uuid,
protocol,
port,
ipRange,
direction,
})
}
}
// ---------------------------------------------------------------------------
async _createPrivateNetwork({
poolIds,
pifIds,
@@ -661,8 +844,6 @@ class SDNController extends EventEmitter {
const privateNetwork = new PrivateNetwork(this, uuidv4(), preferredCenter)
for (const pool of pools) {
await this._setPoolControllerIfNeeded(pool)
const pifId = pifIds.find(id => {
const pif = this._xo.getXapiObject(this._xo.getObject(id, 'PIF'))
return pif.$pool.$ref === pool.$ref
@@ -703,9 +884,11 @@ class SDNController extends EventEmitter {
await Promise.all(
map(hosts, async host => {
await createTunnel(host, createdNetwork)
this._createOvsdbClient(host)
this._getOrCreateOvsdbClient(host)
this._getOrCreateOfChannel(host)
})
)
await this._setPoolControllerIfNeeded(pool)
await privateNetwork.addNetwork(createdNetwork)
this._networks.set(createdNetwork.$id, createdNetwork.$ref)
@@ -721,6 +904,10 @@ class SDNController extends EventEmitter {
// ---------------------------------------------------------------------------
async _manageXapi(xapi) {
if (this._managed.includes(xapi.pool.uuid)) {
return noop // pushed in _cleaners
}
const { objects } = xapi
const objectsRemovedXapi = this._objectsRemoved.bind(this, xapi)
@@ -729,6 +916,7 @@ class SDNController extends EventEmitter {
objects.on('remove', objectsRemovedXapi)
await this._installCaCertificateIfNeeded(xapi)
this._managed.push(xapi.pool.uuid)
return () => {
objects.removeListener('add', this._objectsAdded)
@@ -738,7 +926,7 @@ class SDNController extends EventEmitter {
}
_objectsAdded(objects) {
forOwn(objects, object => {
forOwn(objects, async object => {
const { $type } = object
if ($type === 'host') {
@@ -750,7 +938,18 @@ class SDNController extends EventEmitter {
if (!this._newHosts.some(_ => _.$ref === object.$ref)) {
this._newHosts.push(object)
}
this._createOvsdbClient(object)
this._getOrCreateOvsdbClient(object)
this._getOrCreateOfChannel(object)
} else if ($type === 'PIF') {
log.debug('New PIF', {
device: object.device,
host: object.$host.name_label,
network: object.$network.name_label,
pool: object.$pool.name_label,
})
const client = this.ovsdbClients[object.host]
client.setBridgeControllerForNetwork(object.$network)
}
})
}
@@ -765,6 +964,10 @@ class SDNController extends EventEmitter {
await this._hostUpdated(object)
} else if ($type === 'host_metrics') {
await this._hostMetricsUpdated(object)
} else if ($type === 'VM') {
await this._vmUpdated(object)
} else if ($type === 'VIF') {
await this._vifUpdated(object)
}
} catch (error) {
log.error('Error in _objectsUpdated', {
@@ -782,6 +985,10 @@ class SDNController extends EventEmitter {
this.ovsdbClients,
client => client.host.$id === id
)
this.ofChannels = omitBy(
this.ofChannels,
channel => channel.host.$id === id
)
// If a Star center host is removed: re-elect a new center where needed
const starCenterRef = this._starCenters.get(id)
@@ -898,6 +1105,8 @@ class SDNController extends EventEmitter {
})
}
this._setBridgeControllerForHost(host)
const privateNetworks = filter(
this.privateNetworks,
privateNetwork => privateNetwork[host.$pool.uuid] !== undefined
@@ -928,6 +1137,64 @@ class SDNController extends EventEmitter {
return this._hostUnreachable(ovsdbClient.host)
}
async _vmUpdated(vm) {
forOwn(vm.current_operations, async (value, key) => {
if (this._handledTasks.includes(key)) {
return
}
this._handledTasks.push(key)
// Clean before task ends
if (
value === 'migrate_send' ||
value === 'pool_migrate' ||
value === 'clean_reboot' ||
value === 'hard_reboot' ||
value === 'hard_shutdown' ||
value === 'clean_shutdown'
) {
await this._cleanOfRules(vm)
}
await vm.$xapi.watchTask(key).catch(noop)
// Re-apply rules after task ended
if (
value === 'migrate_send' ||
value === 'pool_migrate' ||
value === 'clean_reboot' ||
value === 'hard_reboot' ||
value === 'start' ||
value === 'start_on'
) {
vm = await vm.$xapi.barrier(vm.$ref)
await this._applyOfRules(vm)
}
this._handledTasks = filter(this._handledTasks, ref => ref !== key)
})
}
async _vifUpdated(vif) {
await Promise.all(
map(vif.current_operations, async (value, key) => {
if (this._handledTasks.includes(key)) {
return
}
this._handledTasks.push(key)
if (value === 'plug') {
await vif.$xapi.watchTask(key).catch(noop)
vif = await vif.$xapi.barrier(vif.$ref)
await this._applyVifOfRules(vif)
} else if (value === 'unplug' || value === 'unplug_force') {
await this._cleanVifOfRules(vif)
await vif.$xapi.watchTask(key).catch(noop)
}
this._handledTasks = filter(this._handledTasks, ref => ref !== key)
})
)
}
// ---------------------------------------------------------------------------
async _setPoolControllerIfNeeded(pool) {
@@ -948,9 +1215,20 @@ class SDNController extends EventEmitter {
})
}
const hosts = filter(pool.$xapi.objects.all, { $type: 'host' })
await Promise.all(
hosts.map(host => {
return this._setBridgeControllerForHost(host)
})
)
this._cleaners.push(await this._manageXapi(pool.$xapi))
}
_setBridgeControllerForHost(host) {
const client = this.ovsdbClients[host.$ref]
return client.setBridgeController()
}
// ---------------------------------------------------------------------------
async _installCaCertificateIfNeeded(xapi) {
@@ -1116,18 +1394,60 @@ class SDNController extends EventEmitter {
// ---------------------------------------------------------------------------
_createOvsdbClient(host) {
if (this.ovsdbClients[host.$ref] !== undefined) {
async _applyVifOfRules(vif) {
if (!vif.currently_attached) {
return
}
const client = new OvsdbClient(
host,
this._clientKey,
this._clientCert,
this._caCert
)
this.ovsdbClients[host.$ref] = client
const vifRules = vif.other_config['xo:sdn-controller:of-rules']
const parsedRules = vifRules !== undefined ? JSON.parse(vifRules) : []
for (const stringRule of parsedRules) {
const rule = JSON.parse(stringRule)
await this._addRule({ ...rule, vifId: vif.$id })
}
}
async _cleanVifOfRules(vif) {
const vifRules = vif.other_config['xo:sdn-controller:of-rules']
const parsedRules = vifRules !== undefined ? JSON.parse(vifRules) : []
for (const stringRule of parsedRules) {
const rule = JSON.parse(stringRule)
await this._deleteRule({ ...rule, vifId: vif.$id }, false)
}
}
async _cleanOfRules(vm) {
for (const vif of vm.$VIFs) {
await this._cleanVifOfRules(vif)
}
}
async _applyOfRules(vm) {
for (const vif of vm.$VIFs) {
await this._applyVifOfRules(vif)
}
}
// ---------------------------------------------------------------------------
_getOrCreateOvsdbClient(host) {
let client = this.ovsdbClients[host.$ref]
if (client === undefined) {
client = new OvsdbClient(host, this._tlsHelper)
this.ovsdbClients[host.$ref] = client
}
return client
}
_getOrCreateOfChannel(host) {
let channel = this.ofChannels[host.$ref]
if (channel === undefined) {
channel = new OpenFlowChannel(host, this._tlsHelper)
this.ofChannels[host.$ref] = channel
}
return channel
}
}

View File

@@ -1,3 +1,4 @@
import assert from 'assert'
import createLogger from '@xen-orchestra/log'
import { filter, forOwn, sample } from 'lodash'
@@ -61,13 +62,39 @@ export class PrivateNetwork {
otherConfig['xo:sdn-controller:encrypted'] === 'true'
? createPassword()
: undefined
const pifDevice = otherConfig['xo:sdn-controller:pif-device']
const pifVlan = +otherConfig['xo:sdn-controller:vlan']
const hostPif = hostClient.host.$PIFs.find(
pif =>
pif?.device === pifDevice &&
pif.VLAN === pifVlan &&
pif.ip_configuration_mode !== 'None'
)
const centerPif = centerClient.host.$PIFs.find(
pif =>
pif?.device === pifDevice &&
pif.VLAN === pifVlan &&
pif.ip_configuration_mode !== 'None'
)
assert(hostPif !== undefined, 'No PIF found', {
privateNetwork: this.uuid,
pifDevice,
pifVlan,
host: host.name_label,
})
assert(centerPif !== undefined, 'No PIF found in center', {
privateNetwork: this.uuid,
pifDevice,
pifVlan,
host: this.center.name_label,
})
let bridgeName
try {
;[bridgeName] = await Promise.all([
hostClient.addInterfaceAndPort(
network,
centerClient.host.address,
centerPif.IP,
encapsulation,
vni,
password,
@@ -75,7 +102,7 @@ export class PrivateNetwork {
),
centerClient.addInterfaceAndPort(
centerNetwork,
hostClient.host.address,
hostPif.IP,
encapsulation,
vni,
password,

View File

@@ -0,0 +1,401 @@
import assert from 'assert'
import createLogger from '@xen-orchestra/log'
import ipaddr from 'ipaddr.js'
import openflow from '@xen-orchestra/openflow'
import parse from '@xen-orchestra/openflow/parse-socket'
import { coalesceCalls } from '@vates/coalesce-calls'
import { EventEmitter } from 'events'
import { fromEvent } from 'promise-toolbox'
// =============================================================================
const log = createLogger('xo:xo-server:sdn-controller:openflow-controller')
const version = openflow.versions.openFlow11
const ofProtocol = openflow.protocols[version]
const OPENFLOW_PORT = ofProtocol.sslPort
// -----------------------------------------------------------------------------
const parseIp = ipAddress => {
if (ipAddress === '') {
return
}
let addr, mask
if (ipAddress.includes('/')) {
const ip = ipaddr.parseCIDR(ipAddress)
addr = ip[0].toString()
const maskOctets = ipaddr.IPv4.subnetMaskFromPrefixLength(ip[1]).octets
mask = ipaddr.fromByteArray(maskOctets.map(i => 255 - i)).toString() // Use wildcarded mask
} else {
// TODO: return ipAddress directly?
const ip = ipaddr.parse(ipAddress)
addr = ip.toString()
}
return { addr, mask }
}
const dlAndNwProtocolFromString = protocol => {
switch (protocol) {
case 'IP':
return { dlType: ofProtocol.dlType.ip }
case 'ICMP':
return {
dlType: ofProtocol.dlType.ip,
nwProto: ofProtocol.nwProto.icmp,
}
case 'TCP':
return {
dlType: ofProtocol.dlType.ip,
nwProto: ofProtocol.nwProto.tcp,
}
case 'UDP':
return {
dlType: ofProtocol.dlType.ip,
nwProto: ofProtocol.nwProto.udp,
}
case 'ARP':
return { dlType: ofProtocol.dlType.arp }
default:
return {} // TODO: Error?
}
}
// =============================================================================
export class OpenFlowChannel extends EventEmitter {
/*
Create an SSL connection to an XCP-ng host.
Interact with the host's OpenVSwitch (OVS) daemon to manage its flows with OpenFlow11.
See:
- OpenFlow11 spec: https://www.opennetworking.org/wp-content/uploads/2014/10/openflow-spec-v1.1.0.pdf
*/
constructor(host, tlsHelper) {
super()
this.host = host
this._tlsHelper = tlsHelper
this._coalesceConnect = coalesceCalls(this._connect)
this._socket = undefined
log.debug('New OpenFlow channel', {
host: this.host.name_label,
})
}
// ---------------------------------------------------------------------------
async addRule(vif, allow, protocol, port, ipRange, direction, ofport) {
log.info('Adding OF rule', {
allow,
protocol,
port,
ipRange,
direction,
vif: vif.uuid,
})
const instructions = [
{
type: ofProtocol.instructionType.applyActions,
actions: allow
? [
{
type: ofProtocol.actionType.output,
port: ofProtocol.port.normal,
},
]
: [],
},
]
const ip = parseIp(ipRange)
const { dlType, nwProto } = dlAndNwProtocolFromString(protocol)
const mac = vif.MAC
await this._coalesceConnect(vif.network)
if (direction.includes('from')) {
this._addFlow(
{
type: ofProtocol.matchType.standard,
dl_type: dlType,
dl_src: mac,
nw_proto: nwProto,
nw_dst: ip?.addr,
nw_dst_mask: ip?.mask,
tp_src: port,
in_port: ofport,
},
instructions
)
if (nwProto !== undefined) {
this._addFlow(
{
type: ofProtocol.matchType.standard,
dl_type: dlType,
dl_dst: mac,
nw_proto: nwProto,
nw_src: ip?.addr,
nw_src_mask: ip?.mask,
tp_dst: port,
},
instructions
)
}
}
if (direction.includes('to')) {
if (nwProto !== undefined) {
this._addFlow(
{
type: ofProtocol.matchType.standard,
dl_type: dlType,
dl_src: mac,
nw_proto: nwProto,
nw_dst: ip?.addr,
nw_dst_mask: ip?.mask,
tp_dst: port,
in_port: ofport,
},
instructions
)
}
this._addFlow(
{
type: ofProtocol.matchType.standard,
dl_type: dlType,
dl_dst: mac,
nw_proto: nwProto,
nw_src: ip?.addr,
nw_src_mask: ip?.mask,
tp_src: port,
},
instructions
)
}
await this._tlsHelper.closeSocket(this._socket)
delete this._socket
}
async deleteRule(vif, protocol, port, ipRange, direction, ofport) {
log.info('Deleting OF rule', {
protocol,
port,
ipRange,
direction,
vif: vif.uuid,
})
const ip = parseIp(ipRange)
const { dlType, nwProto } = dlAndNwProtocolFromString(protocol)
const mac = vif.MAC
await this._coalesceConnect(vif.network)
if (direction.includes('from')) {
this._removeFlows({
type: ofProtocol.matchType.standard,
dl_type: dlType,
dl_src: mac,
nw_proto: nwProto,
nw_dst: ip?.addr,
nw_dst_mask: ip?.mask,
tp_src: port,
})
if (nwProto !== undefined) {
this._removeFlows({
type: ofProtocol.matchType.standard,
dl_type: dlType,
dl_dst: mac,
nw_proto: nwProto,
nw_src: ip?.addr,
nw_src_mask: ip?.mask,
tp_dst: port,
})
}
}
if (direction.includes('to')) {
if (nwProto !== undefined) {
this._removeFlows({
type: ofProtocol.matchType.standard,
dl_type: dlType,
dl_src: mac,
nw_proto: nwProto,
nw_dst: ip?.addr,
nw_dst_mask: ip?.mask,
tp_dst: port,
})
}
this._removeFlows({
type: ofProtocol.matchType.standard,
dl_type: dlType,
dl_dst: mac,
nw_proto: nwProto,
nw_src: ip?.addr,
nw_src_mask: ip?.mask,
tp_src: port,
})
}
await this._tlsHelper.closeSocket(this._socket)
delete this._socket
}
// ===========================================================================
_processMessage(message) {
if (message.header === undefined) {
log.error('Failed to get header while processing message', {
message,
})
return
}
const ofType = message.header.type
switch (ofType) {
case ofProtocol.type.hello:
this._sendPacket(
this._syncMessage(ofProtocol.type.hello, message.header.xid)
)
this._sendPacket(
this._syncMessage(ofProtocol.type.featuresRequest, message.header.xid)
)
break
case ofProtocol.type.error:
{
const { code, type } = message
log.error('OpenFlow error', {
code,
type,
// data: openflow.unpack(data),
})
}
break
case ofProtocol.type.echoRequest:
this._sendPacket(
this._syncMessage(ofProtocol.type.echoReply, message.header.xid)
)
break
case ofProtocol.type.packetIn:
log.debug('PACKET_IN')
break
case ofProtocol.type.featuresReply:
{
const { datapath_id: dpid, capabilities, ports } = message
log.debug('FEATURES_REPLY', { dpid, capabilities, ports })
this._sendPacket(
this._syncMessage(
ofProtocol.type.getConfigRequest,
message.header.xid
)
)
}
break
case ofProtocol.type.getConfigReply:
{
const { flags } = message
log.debug('CONFIG_REPLY', { flags })
this.emit('ofConnected')
}
break
case ofProtocol.type.portStatus:
log.debug('PORT_STATUS')
break
case ofProtocol.type.flowRemoved:
log.debug('FLOW_REMOVED')
break
default:
log.error('Unknown OpenFlow type', { ofType })
break
}
}
_addFlow(match, instructions) {
const packet = this._flowModMessage(
ofProtocol.flowModCommand.add,
match,
instructions
)
this._sendPacket(packet)
}
_removeFlows(match) {
const packet = this._flowModMessage(ofProtocol.flowModCommand.delete, match)
this._sendPacket(packet)
}
// ---------------------------------------------------------------------------
_syncMessage(type, xid = 1) {
return {
header: {
version,
type,
xid,
},
}
}
_flowModMessage(command, match, instructions = []) {
// TODO: Do not use default priority?
return {
...this._syncMessage(ofProtocol.type.flowMod),
command,
flags: ofProtocol.flowModFlags.sendFlowRem,
match,
instructions,
}
}
// ---------------------------------------------------------------------------
_sendPacket(packet) {
const buf = openflow.pack(packet)
try {
this._socket.write(buf)
} catch (error) {
log.error('Error while writing into socket', {
error,
host: this.host.name_label,
})
}
}
// ---------------------------------------------------------------------------
async _parseMessages() {
for await (const msg of parse(this._socket)) {
if (msg.header !== undefined) {
this._processMessage(msg)
} else {
log.error('Error: Message is unparseable', { msg })
}
}
}
async _connect(networkUuid) {
if (this._socket !== undefined) {
return
}
const pif = this.host.$PIFs.find(pif => pif?.network === networkUuid)
assert(pif !== undefined, 'No PIF available')
this._socket = await this._tlsHelper.connect(pif.IP, OPENFLOW_PORT)
const deleteSocket = () => {
this._socket = undefined
}
this._socket.on('error', deleteSocket)
this._socket.on('end', deleteSocket)
this._parseMessages().catch(error => {
log.error('Error while parsing OF messages', error)
})
await fromEvent(this, 'ofConnected')
}
}

View File

@@ -1,12 +1,15 @@
import assert from 'assert'
import createLogger from '@xen-orchestra/log'
import fromEvent from 'promise-toolbox/fromEvent'
import { connect } from 'tls'
import { forOwn, toPairs } from 'lodash'
// =============================================================================
const log = createLogger('xo:xo-server:sdn-controller:ovsdb-client')
const OVSDB_PORT = 6640
const PROTOCOLS = 'OpenFlow11' // Supported OpenFlow versions
const TARGET = 'pssl:' // OpenFlow Controller target
// =============================================================================
@@ -14,6 +17,10 @@ function toMap(object) {
return ['map', toPairs(object)]
}
function setFromSelect(object) {
return object[0] === 'set' ? object[1] : [object]
}
// =============================================================================
export class OvsdbClient {
@@ -36,7 +43,7 @@ export class OvsdbClient {
- `remote_ip`: Remote IP of the tunnel
*/
constructor(host, clientKey, clientCert, caCert) {
constructor(host, tlsHelper) {
this._numberOfPortAndInterface = 0
this._requestId = 0
@@ -44,7 +51,7 @@ export class OvsdbClient {
this.host = host
this.updateCertificates(clientKey, clientCert, caCert)
this._tlsHelper = tlsHelper
log.debug('New OVSDB client', {
host: this.host.name_label,
@@ -53,18 +60,6 @@ export class OvsdbClient {
// ---------------------------------------------------------------------------
updateCertificates(clientKey, clientCert, caCert) {
this._clientKey = clientKey
this._clientCert = clientCert
this._caCert = caCert
log.debug('Certificates have been updated', {
host: this.host.name_label,
})
}
// ---------------------------------------------------------------------------
async addInterfaceAndPort(
network,
remoteAddress,
@@ -286,6 +281,160 @@ export class OvsdbClient {
socket.destroy()
}
async setBridgeController() {
const socket = await this._connect()
// Add controller to openvswitch table if needed
const params = ['Open_vSwitch']
params.push({
op: 'insert',
table: 'Controller',
row: {
target: TARGET,
},
'uuid-name': 'new_controller',
})
const networks = this.host.$PIFs.map(pif => pif?.$network)
for (const network of networks) {
// network can be undefined so we can't set its controller
// It can happen if there's a ref problem within XAPI
if (network === undefined) {
continue
}
const bridge = await this._getBridgeForNetwork(network, socket)
if (bridge.uuid === undefined) {
continue
}
if (await this._bridgeAlreadyControlled(bridge, socket)) {
continue
}
params.push({
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
mutations: [
['controller', 'insert', ['named-uuid', 'new_controller']],
['protocols', 'insert', PROTOCOLS],
],
})
}
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
if (jsonObjects === undefined) {
socket.destroy()
return
}
if (jsonObjects[0].error !== null) {
log.error('Error while setting controller', {
error: jsonObjects[0].error,
host: this.host.name_label,
})
} else {
this._controllerUuid = jsonObjects[0].result[0].uuid[1]
log.info('Controller set', { host: this.host.name_label })
}
socket.destroy()
}
async setBridgeControllerForNetwork(network) {
const socket = await this._connect()
if (this._controllerUuid === undefined) {
const where = [['target', '==', TARGET]]
const selectResult = await this._select(
'Controller',
['_uuid'],
where,
socket
)
this._controllerUuid = selectResult._uuid[1]
}
assert.notStrictEqual(this._controllerUuid, undefined)
const bridge = await this._getBridgeForNetwork(network, socket)
if (bridge.uuid === undefined) {
socket.destroy()
return
}
if (await this._bridgeAlreadyControlled(bridge, socket)) {
socket.destroy()
return
}
const mutateOperation = {
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
mutations: [
['controller', 'insert', ['uuid', this._controllerUuid]],
['protocols', 'insert', PROTOCOLS],
],
}
const params = ['Open_vSwitch', mutateOperation]
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
if (jsonObjects === undefined) {
socket.destroy()
return
}
if (jsonObjects[0].error !== null) {
log.error('Error while setting controller for network', {
error: jsonObjects[0].error,
host: this.host.name_label,
network: network.name_label,
})
} else {
log.info('Controller set for network', {
controller: this._controllerUuid,
host: this.host.name_label,
network: network.name_label,
})
}
socket.destroy()
}
async getOfPortForVif(vif) {
const where = [
['external_ids', 'includes', toMap({ 'xs-vif-uuid': vif.uuid })],
]
const socket = await this._connect()
const selectResult = await this._select(
'Interface',
['name', 'ofport'],
where,
socket,
true // multiResult
)
if (selectResult === undefined) {
log.error('No of port found for VIF', {
network: vif.$network.name_label,
host: this.host.name_label,
vm: vif.$VM.name_label,
vif: vif.uuid,
})
return
}
let ofport
for (const i in selectResult) {
const row = selectResult[i]
if (!row.name.includes('tap')) {
ofport = row.ofport
break
}
}
socket.destroy()
return ofport
}
// ===========================================================================
_parseJson(chunk) {
@@ -351,6 +500,25 @@ export class OvsdbClient {
})
}
// ---------------------------------------------------------------------------
async _bridgeAlreadyControlled(bridge, socket) {
const where = [['_uuid', '==', ['uuid', bridge.uuid]]]
let result = await this._select('Bridge', ['controller'], where, socket)
const controllers = setFromSelect(result.controller)
for (const controller of controllers) {
const where = [['_uuid', '==', controller]]
result = await this._select('Controller', ['target'], where, socket)
if (result.target === TARGET) {
return true
}
}
return false
}
// ---------------------------------------------------------------------------
async _getBridgeForNetwork(network, socket) {
const where = [
['external_ids', 'includes', toMap({ 'xs-network-uuids': network.uuid })],
@@ -406,9 +574,7 @@ export class OvsdbClient {
return
}
return selectResult.ports[0] === 'set'
? selectResult.ports[1]
: [selectResult.ports]
return setFromSelect(selectResult.ports)
}
async _getPortInterfaces(portUuid, socket) {
@@ -423,9 +589,7 @@ export class OvsdbClient {
return
}
return selectResult.interfaces[0] === 'set'
? selectResult.interfaces[1]
: [selectResult.interfaces]
return setFromSelect(selectResult.interfaces)
}
async _interfaceHasRemote(interfaceUuid, remoteAddress, socket) {
@@ -451,12 +615,12 @@ export class OvsdbClient {
// ---------------------------------------------------------------------------
async _select(table, columns, where, socket) {
async _select(table, columns, where, socket, multiResult = false) {
const selectOperation = {
op: 'select',
table: table,
columns: columns,
where: where,
table,
columns,
where,
}
const params = ['Open_vSwitch', selectOperation]
@@ -487,6 +651,10 @@ export class OvsdbClient {
return
}
if (multiResult) {
return jsonResult.rows
}
// For now all select operations should return only 1 row
assert(
jsonResult.rows.length === 1,
@@ -538,37 +706,7 @@ export class OvsdbClient {
// ---------------------------------------------------------------------------
async _connect() {
const options = {
ca: this._caCert,
key: this._clientKey,
cert: this._clientCert,
host: this.host.address,
port: OVSDB_PORT,
rejectUnauthorized: false,
requestCert: false,
}
const socket = connect(options)
try {
await fromEvent(socket, 'secureConnect')
} catch (error) {
log.error('TLS connection failed', {
error,
code: error.code,
host: this.host.name_label,
})
throw error
}
socket.on('error', error => {
log.error('Socket error', {
error,
code: error.code,
host: this.host.name_label,
})
})
return socket
_connect() {
return this._tlsHelper.connect(this.host.address, OVSDB_PORT)
}
}

View File

@@ -0,0 +1,68 @@
import fromEvent from 'promise-toolbox/fromEvent'
import createLogger from '@xen-orchestra/log'
import { connect } from 'tls'
// =============================================================================
const log = createLogger('xo:xo-server:sdn-controller:tls-connect')
// =============================================================================
export class TlsHelper {
updateCertificates(clientKey, clientCert, caCert) {
this._clientKey = clientKey
this._clientCert = clientCert
this._caCert = caCert
log.debug('Certificates have been updated')
}
// ---------------------------------------------------------------------------
async connect(address, port) {
const options = {
ca: this._caCert,
cert: this._clientCert,
ciphers: 'DEFAULT:!DH',
host: address,
key: this._clientKey,
port,
rejectUnauthorized: false,
requestCert: false,
}
const socket = connect(options)
try {
await fromEvent(socket, 'secureConnect')
} catch (error) {
log.error('TLS connection failed', {
error,
address,
port,
})
throw error
}
socket.on('error', error => {
log.error('Socket error', {
error,
address,
port,
})
})
return socket
}
async closeSocket(socket) {
socket.end()
try {
await fromEvent(socket, 'finish')
} catch (error) {
log.error('TLS socket closure failed', {
error,
socket,
})
}
log.debug('TLS socket closed')
}
}

View File

@@ -2,6 +2,9 @@
#
# See sample.config.toml to override.
# The clone of a template can exceeds the jest timeout if it's used by other tests to create a VM
cloneTempVmTimeout = '1 minute'
# After some executions we saw that `deleteTempResources` takes around `21s`.
# Then, we chose a large timeout to be sure that all resources created by `xo-server-test`
# will be deleted

View File

@@ -32,7 +32,7 @@
"@babel/preset-env": "^7.1.6",
"@iarna/toml": "^2.2.1",
"@vates/parse-duration": "^0.1.0",
"app-conf": "^0.7.0",
"app-conf": "^0.8.0",
"babel-plugin-lodash": "^3.2.11",
"golike-defer": "^0.4.1",
"jest": "^24.8.0",

View File

@@ -2,7 +2,7 @@
import defer from 'golike-defer'
import Xo from 'xo-lib'
import XoCollection from 'xo-collection'
import { defaultsDeep, find, forOwn, pick } from 'lodash'
import { defaultsDeep, find, forOwn, iteratee, pick } from 'lodash'
import { fromEvent } from 'promise-toolbox'
import { parseDuration } from '@vates/parse-duration'
@@ -84,6 +84,15 @@ class XoConnection extends Xo {
async waitObjectState(id, predicate) {
let obj = this._objects.all[id]
if (typeof predicate !== 'function') {
const fn = iteratee(predicate)
predicate = () => {
if (!fn(obj)) {
throw new Error('retry')
}
}
}
while (true) {
try {
await predicate(obj)
@@ -100,6 +109,12 @@ class XoConnection extends Xo {
return id
}
async createTempResourceSet(params) {
const { id } = await xo.call('resourceSet.create', params)
this._tempResourceDisposers.push('resourceSet.delete', { id })
return id
}
async getUser(id) {
return find(await super.call('user.getAll'), { id })
}
@@ -155,6 +170,16 @@ class XoConnection extends Xo {
})
}
async cloneTempVm(id) {
const clonedVmId = await this.call('vm.clone', {
full_copy: false,
id,
name: getDefaultName(),
})
this._durableResourceDisposers.push('vm.delete', { id: clonedVmId })
return this.getOrWaitObject(clonedVmId)
}
async startTempVm(id, params, withXenTools = false) {
await this.call('vm.start', { id, ...params })
this._tempResourceDisposers.push('vm.stop', { id, force: true })

View File

@@ -1,5 +1,7 @@
/* eslint-env jest */
import { parseDuration } from '@vates/parse-duration'
import config from '../_config'
import xo from '../_xoConnection'
@@ -48,4 +50,85 @@ describe('issue', () => {
await xo.call('network.delete', { id })
})
describe('4980', () => {
let template
beforeAll(async () => {
jest.setTimeout(parseDuration(config.cloneTempVmTimeout))
template = await xo.cloneTempVm(config.templates.default)
})
const bootOrder = 'cd'
const virtualizationMode = 'hvm'
beforeAll(async () => {
await Promise.all([
xo.call('vm.set', {
id: template.id,
virtualizationMode,
}),
xo.call('vm.setBootOrder', { vm: template.id, order: bootOrder }),
])
await xo.waitObjectState(template.id, {
virtualizationMode,
boot: {
order: bootOrder,
},
})
})
test('create vm with disks should keep the template boot order', async () => {
const vm = await xo.createTempVm({
template: template.id,
VDIs: [
{
size: 1,
SR: config.srs.default,
type: 'user',
},
],
})
expect(vm.boot.order).toBe(bootOrder)
})
test('create vm without disks should make network boot the first option', async () => {
const vm = await xo.createTempVm({
template: template.id,
})
expect(vm.boot.order).toBe('n' + bootOrder)
})
test('create vm with disks and network installation should make network boot the first option', async () => {
const vm = await xo.createTempVm({
template: template.id,
installation: {
method: 'network',
repository: 'pxe',
},
VDIs: [
{
size: 1,
SR: config.srs.default,
type: 'user',
},
],
})
expect(vm.boot.order).toBe('n' + bootOrder)
})
})
describe('5265', () => {
const rsName = 'xo-server-test resource set'
const subjects = ['one', 'two', 'three']
test('resourceSet.removeSubject call', async () => {
const id = await xo.createTempResourceSet({
name: rsName,
subjects: subjects,
})
await xo.call('resourceSet.removeSubject', {
id,
subject: subjects[0],
})
})
})
})

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-usage-report",
"version": "0.8.0",
"version": "0.9.0",
"license": "AGPL-3.0-or-later",
"description": "Report resources usage with their evolution",
"keywords": [
@@ -41,7 +41,7 @@
"csv-stringify": "^5.5.0",
"handlebars": "^4.0.6",
"html-minifier": "^4.0.0",
"human-format": "^0.10.0",
"human-format": "^0.11.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.15.0"
},

View File

@@ -405,16 +405,26 @@ async function getSrsStats({ xo, xoObjects }) {
}
function computeGlobalVmsStats({ haltedVms, vmsStats, xo }) {
const allVms = concat(
map(vmsStats, vm => ({
uuid: vm.uuid,
name: vm.name,
})),
map(haltedVms, vm => ({
uuid: vm.uuid,
name: vm.name_label,
}))
)
const allVms = vmsStats.map(vm => ({
uuid: vm.uuid,
name: vm.name,
}))
haltedVms.forEach(vm => {
const isReplication =
'start' in vm.blockedOperations &&
vm.tags.some(
tag => tag === 'Disaster Recovery' || tag === 'Continuous Replication'
)
// Exclude replicated VMs because they keep being created/destroyed due to the implementation
if (!isReplication) {
allVms.push({
uuid: vm.uuid,
name: vm.name_label,
})
}
})
return Object.assign(
computeMeans(vmsStats, [
@@ -426,7 +436,7 @@ function computeGlobalVmsStats({ haltedVms, vmsStats, xo }) {
'netTransmission',
]),
{
number: allVms.length,
number: vmsStats.length + haltedVms.length,
allVms,
}
)

View File

@@ -42,6 +42,17 @@ perMessageDeflate = { threshold = 524288 } # 512kiB
defaultTokenValidity = '30 days'
maxTokenValidity = '0.5 year'
# 2020-09-29
# Helpful to smoothly transition from old to new LDAP plugin implementation
# See https://github.com/vatesfr/xen-orchestra/pull/5279
# This will be removed in 1 year
#
# When a user logs in using an authentication provider, but another user with the
# same username is found in XO:
# - if true: the 2 users will be merged
# - if false: the user will not be able to log in.
mergeProvidersUsers = true
# Default to `maxTokenValidity`
#permanentCookieValidity = '30 days'
@@ -119,6 +130,12 @@ timeout = 600e3
#[workerOptions]
#numWorkers = 2
[selfService]
# If true, a snapshot of a Self Service VM will consume as much resources as a
# normal VM would
ignoreVmSnapshotResources = false
[xapiOptions]
maxUncoalescedVdis = 1
vdiExportConcurrency = 12

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.62.1",
"version": "5.67.0",
"license": "AGPL-3.0-or-later",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -38,7 +38,7 @@
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/defined": "^0.0.0",
"@xen-orchestra/emit-async": "^0.0.0",
"@xen-orchestra/fs": "^0.10.4",
"@xen-orchestra/fs": "^0.11.1",
"@xen-orchestra/log": "^0.2.0",
"@xen-orchestra/mixin": "^0.0.0",
"@xen-orchestra/self-signed": "^0.1.0",
@@ -46,8 +46,8 @@
"@vates/decorate-with": "^0.0.1",
"@vates/parse-duration": "0.1.0",
"ajv": "^6.1.1",
"app-conf": "^0.7.0",
"archiver": "^3.0.0",
"app-conf": "^0.8.0",
"archiver": "^5.0.0",
"async-iterator-to-stream": "^1.0.1",
"base64url": "^3.0.0",
"bind-property-descriptor": "^1.0.0",
@@ -58,7 +58,7 @@
"connect-flash": "^0.1.1",
"cookie": "^0.4.0",
"cookie-parser": "^1.4.3",
"d3-time-format": "^2.1.1",
"d3-time-format": "^3.0.0",
"debug": "^4.0.1",
"decorator-synchronized": "^0.5.0",
"deptree": "^1.0.0",
@@ -70,7 +70,7 @@
"fatfs": "^0.10.4",
"from2": "^2.3.0",
"fs-extra": "^9.0.0",
"get-stream": "^5.1.0",
"get-stream": "^6.0.0",
"golike-defer": "^0.4.1",
"hashy": "^0.8.0",
"helmet": "^3.9.0",
@@ -78,10 +78,10 @@
"http-proxy": "^1.16.2",
"http-request-plus": "^0.8.0",
"http-server-plus": "^0.11.0",
"human-format": "^0.10.0",
"human-format": "^0.11.0",
"is-redirect": "^1.0.0",
"iterable-backoff": "^0.1.0",
"jest-worker": "^24.0.0",
"jest-worker": "^26.3.0",
"js-yaml": "^3.10.0",
"json-rpc-peer": "^0.16.0",
"json5": "^2.0.1",
@@ -96,24 +96,24 @@
"moment-timezone": "^0.5.14",
"ms": "^2.1.1",
"multikey-hash": "^1.0.4",
"ndjson": "^1.5.0",
"ndjson": "^2.0.0",
"openpgp": "^4.10.4",
"otplib": "^11.0.0",
"parse-pairs": "^1.0.0",
"partial-stream": "0.0.0",
"passport": "^0.4.0",
"passport-local": "^1.0.0",
"pretty-format": "^24.0.0",
"pretty-format": "^26.4.2",
"promise-toolbox": "^0.15.0",
"proxy-agent": "^3.0.0",
"pug": "^2.0.0-rc.4",
"pug": "^3.0.0",
"pump": "^3.0.0",
"pumpify": "^2.0.0",
"pw": "^0.0.4",
"readable-stream": "^3.2.0",
"redis": "^2.8.0",
"redis": "^3.0.2",
"schema-inspector": "^1.6.8",
"semver": "^6.0.0",
"semver": "^7.3.2",
"serve-static": "^1.13.1",
"set-cookie-parser": "^2.3.5",
"source-map-support": "^0.5.16",
@@ -123,10 +123,10 @@
"stoppable": "^1.0.5",
"strict-timeout": "^1.0.0",
"struct-fu": "^1.2.0",
"subleveldown": "^4.1.4",
"subleveldown": "^5.0.1",
"tar-stream": "^2.0.1",
"through2": "^3.0.0",
"tmp": "^0.1.0",
"through2": "^4.0.2",
"tmp": "^0.2.1",
"unzipper": "^0.10.5",
"uuid": "^3.0.1",
"value-matcher": "^0.2.0",
@@ -138,8 +138,8 @@
"xo-acl-resolver": "^0.4.1",
"xo-collection": "^0.4.1",
"xo-common": "^0.5.0",
"xo-remote-parser": "^0.5.0",
"xo-vmdk-to-vhd": "^1.2.1",
"xo-remote-parser": "^0.6.0",
"xo-vmdk-to-vhd": "^1.3.1",
"yazl": "^2.4.3"
},
"devDependencies": {

View File

@@ -12,8 +12,6 @@ export async function getCurrentPermissions() {
return /* await */ this.getPermissionsForUser(this.session.get('user_id'))
}
getCurrentPermissions.permission = ''
getCurrentPermissions.description =
'get (explicit) permissions by object for the current user'

View File

@@ -298,3 +298,19 @@ isHyperThreadingEnabled.params = {
isHyperThreadingEnabled.resolve = {
host: ['id', 'host', 'administrate'],
}
// -------------------------------------------------------------------
export async function scanPifs({ host }) {
await this.getXapi(host).callAsync('PIF.scan', host._xapiRef)
}
scanPifs.description = 'Refresh the list of physical interfaces for this host'
scanPifs.params = {
id: { type: 'string' },
}
scanPifs.resolve = {
host: ['id', 'host', 'administrate'],
}

View File

@@ -25,7 +25,6 @@ export function getAll(params) {
)
}
getAll.permission = ''
getAll.description = 'List all ipPools'
// -------------------------------------------------------------------

View File

@@ -42,8 +42,6 @@ export async function setDefaultSr({ sr }) {
await this.getXapi(sr).setDefaultSr(sr._xapiId)
}
setDefaultSr.permission = '' // signed in
setDefaultSr.params = {
sr: {
type: 'string',

Some files were not shown because too many files have changed in this diff Show More