Compare commits

..

136 Commits

Author SHA1 Message Date
Florent Beauchamp
5299c101c2 fix: add missing service worker file 2021-10-07 17:55:20 +02:00
Florent Beauchamp
83ca34807d feat(xo-web): send notification 2021-10-07 10:27:52 +02:00
Julien Fontanet
9e50b5dd83 feat(proxy): logging is now dynamically configurable
It was done for xo-server in f20d5cd8d
2021-10-06 16:54:57 +02:00
Julien Fontanet
29d8753574 chore(backups/VmBackup#_selectBaseVm): add debug logs 2021-10-06 16:48:42 +02:00
Pierre Donias
f93e1e1695 feat: release 5.63.0 (#5925) 2021-09-30 15:25:34 +02:00
Pierre Donias
0eaac8fd7a feat: technical release (#5924) 2021-09-30 11:17:45 +02:00
Julien Fontanet
06c71154b9 fix(xen-api/_setHostAddressInUrl): pass params in array
Introduced in fb21e4d58
2021-09-30 10:32:12 +02:00
Julien Fontanet
0e8f314dd6 fix(xo-web/new-vm): don't send default networkConfig (#5923)
Fixes #5918
2021-09-30 09:37:12 +02:00
Florent BEAUCHAMP
f53ec8968b feat(xo-web/SortedTable): move filter and pagination to top (#5914) 2021-09-29 17:35:46 +02:00
Mathieu
919d118f21 feat(xo-web/health): filter duplicated MAC addresses by running VMs (#5917)
See xoa-support#4054
2021-09-24 17:25:42 +02:00
Mathieu
216b759df1 feat(xo-web/health): hide CR VMs duplicated MAC addresses (#5916)
See xoa-support#4054
2021-09-24 15:52:34 +02:00
Julien Fontanet
01450db71e fix(proxy/backup.run): clear error on license issue
Fixes https://xcp-ng.org/forum/topic/4901/backups-silently-fail-with-invalid-xo-proxy-license
2021-09-24 13:15:32 +02:00
Julien Fontanet
ed987e1610 fix(proxy/api/ndJsonStream): send JSON-RPC error if whole iteration failed
See https://xcp-ng.org/forum/topic/4901/backups-silently-fail-with-invalid-xo-proxy-license
2021-09-24 13:15:24 +02:00
Florent BEAUCHAMP
2773591e1f feat(xo-web): add go back to ActionButton and use it when saving a backup (#5913)
See xoa-support#2149
2021-09-24 11:38:37 +02:00
Pierre Donias
a995276d1e fix(xo-server-netbox): better handle missing uuid custom field (#5909)
Fixes #5905
See #5806
See #5834
See xoa-support#3812

- Check if `uuid` custom field has correctly been configured before synchronizing
- Delete VMs that don't have a UUID before synchronizing VMs to avoid conflicts
2021-09-22 18:08:09 +02:00
Nicolas Raynaud
ffb6a8fa3f feat(VHD import): ensure uploaded file is a VHD (#5906) 2021-09-21 16:25:50 +02:00
Pierre Donias
0966efb7f2 fix(xo-server-netbox): handle nested prefixes (#5908)
See xoa-support#4018

When assigning prefixes to VMs, always pick the smallest prefix that the IP
matches
2021-09-21 09:55:47 +02:00
Julien Fontanet
4a0a708092 feat: release 5.62.1 2021-09-17 10:04:36 +02:00
Julien Fontanet
6bf3b6f3e0 feat(xo-server): 5.82.2 2021-09-17 09:24:32 +02:00
Julien Fontanet
8f197fe266 feat(@xen-orchestra/proxy): 0.14.6 2021-09-17 09:24:05 +02:00
Julien Fontanet
e1a3f680f2 feat(xen-api): 0.34.2 2021-09-17 09:23:28 +02:00
Julien Fontanet
e89cca7e90 feat: technical release 2021-09-17 09:19:26 +02:00
Nicolas Raynaud
5bb2767d62 fix(xo-server/{disk,vm}.import): fix import of very small VMDK files (#5903) 2021-09-17 09:17:34 +02:00
Julien Fontanet
95f029e0e7 fix(xen-api/putResource): fix non-stream use case
Introduced by ea10df8a92
2021-09-14 17:42:20 +02:00
Julien Fontanet
fb21e4d585 chore(xen-api/_setHostAddressInUrl): use _roCall to fetch network ref
Introduced by a84fac1b6
2021-09-14 17:42:20 +02:00
Julien Fontanet
633805cec9 fix(xen-api/_setHostAddressInUrl): correctly fetch network ref
Introduced by a84fac1b6
2021-09-14 17:42:20 +02:00
Marc Ungeschikts
b8801d7d2a "rentention" instead of "retention" (#5904) 2021-09-14 16:30:10 +02:00
Julien Fontanet
a84fac1b6a fix(xen-api/{get,put}Resource): use provided address when possible
Fixes #5896

Introduced by ea10df8a92

Don't use the address provided by XAPI when connecting to the pool master and without a default migration network as it will unnecessarily break NATted hosts.
2021-09-14 13:52:34 +02:00
Julien Fontanet
a9de4ceb30 chore(xo-server/config.toml): explicit auth delay is per user 2021-09-12 10:55:31 +02:00
Julien Fontanet
827b55d60c fix(xo-server/config.toml): typo 2021-09-12 10:54:49 +02:00
Julien Fontanet
0e1fe76b46 chore: update dev deps 2021-09-09 13:48:15 +02:00
Julien Fontanet
097c9e8e12 feat(@xen-orchestra/proxy): 0.14.5 2021-09-07 19:02:57 +02:00
Pierre Donias
266356cb20 fix(xo-server/xapi-objects-to-xo/VM/addresses): handle newline-delimited IPs (#5897)
See xoa-support#3812
See #5860

This is related to a505cd9 which handled space delimited IPs, but apparently,
IPs can also be newline delimited depending on which Xen tools version is used.
2021-09-03 12:30:47 +02:00
Julien Fontanet
6dba39a804 fix(xo-server/vm.set): fix converting to BIOS (#5895)
Fixes xoa-support#3991
2021-09-02 14:11:39 +02:00
Olivier Lambert
3ddafa7aca fix(docs/xoa): clarify first console connection (#5894) 2021-09-01 12:51:33 +02:00
Julien Fontanet
9d8e232684 chore(xen-api): dont import promise-toolbox/retry twice
Introduced by ea10df8a9
2021-08-31 12:28:23 +02:00
Anthony Stivers
bf83c269c4 fix(xo-web/user): SSH key formatting (#5892)
Fixes #5891

Allow SSH key to be broken anywhere to avoid breaking page formatting.
2021-08-31 11:42:25 +02:00
Pierre Donias
54e47c98cc feat: release 5.62.0 (#5893) 2021-08-31 10:59:07 +02:00
Pierre Donias
118f2594ea feat: technical release (#5889) 2021-08-30 15:40:26 +02:00
Julien Fontanet
ab4fcd6ac4 fix(xen-api/{get,put}Resource): correctly fetch host
Introduced by ea10df8a9
2021-08-30 15:23:42 +02:00
Pierre Donias
ca6f345429 feat: technical release (#5888) 2021-08-30 12:08:10 +02:00
Pierre Donias
79b8e1b4e4 fix(xo-server-auth-ldap): ensure-array dependency (#5887) 2021-08-30 12:01:06 +02:00
Pierre Donias
cafa1ffa14 feat: technical release (#5886) 2021-08-30 11:01:14 +02:00
Mathieu
ea10df8a92 feat(xen-api/{get,put}Resource): use default migration network if available (#5883) 2021-08-30 00:14:31 +02:00
Julien Fontanet
85abc42100 chore(xo-web): use sass instead of node-sass
Fixes build with Node 16
2021-08-27 14:22:00 +02:00
Mathieu
4747eb4386 feat(host): display warning for eol host version (#5847)
Fixes #5840
2021-08-24 14:43:01 +02:00
tisteagle
ad9cc900b8 feat(docs/updater): add nodejs.org to required domains (#5881) 2021-08-22 16:33:16 +02:00
Pierre Donias
6cd93a7bb0 feat(xo-server-netbox): add primary IPs to VMs (#5879)
See xoa-support#3812
See #5633
2021-08-20 12:47:29 +02:00
Julien Fontanet
3338a02afb feat(fs/getSyncedHandler): returns disposable to an already synced remote
Also, no need to forget it.
2021-08-20 10:14:39 +02:00
Julien Fontanet
31cfe82224 chore: update to index-modules@0.4.3
Fixes #5877

Introduced by 030477454

This new version fixes the `--auto` mode used by `xo-web`.
2021-08-18 10:08:10 +02:00
Pierre Donias
70a191336b fix(CHANGELOG): missing PR link (#5876) 2021-08-17 10:13:22 +02:00
Julien Fontanet
030477454c chore: update deps 2021-08-17 09:59:42 +02:00
Pierre Donias
2a078d1572 fix(xo-server/host): clearHost argument needs to have a $pool property (#5875)
See xoa-support#3118
Introduced by b2a56c047c
2021-08-17 09:51:36 +02:00
Julien Fontanet
3c1f96bc69 chore: update dev deps 2021-08-16 14:10:18 +02:00
Mathieu
7d30bdc148 fix(xo-web/TabButtonLink): should not be empty on small screens (#5874) 2021-08-16 09:45:44 +02:00
Mathieu
5d42961761 feat(xo-server/network.create): allow pool admins (#5873) 2021-08-13 14:22:58 +02:00
Julien Fontanet
f20d5cd8d3 feat(xo-server): logging is now dynamically configurable 2021-08-12 17:30:56 +02:00
Julien Fontanet
f5111c0f41 fix(mixins/Config#watch): use deep equality to check changes
Because objects (and arrays) will always be new ones and thus different.
2021-08-12 17:29:57 +02:00
Pierre Donias
f5473236d0 fix(xo-web): dont warn when restoring XO config (#5872) 2021-08-12 09:52:45 +02:00
Julien Fontanet
d3cb31f1a7 feat(log/configure): filter can be an array 2021-08-11 18:09:42 +02:00
Pierre Donias
d5f5cdd27a fix(xo-server-auth-ldap): create logger inside plugin (#5864)
The plugin was wrongly expecting a logger instance to be passed on instantiation
2021-08-11 11:21:22 +02:00
Pierre Donias
656dc8fefc fix(xo-server-ldap): handle groups with no members (#5862)
See xoa-support#3906
2021-08-10 14:12:39 +02:00
Pierre Donias
a505cd9567 fix(xo-server/xapi-objects-to-xo/VM/addresses): handle old tools alias properties (#5860)
See https://xcp-ng.org/forum/topic/4810
See #5805
2021-08-10 10:22:13 +02:00
Pierre Donias
f2a860b01a feat: release 5.61.0 (#5867) 2021-07-30 16:48:13 +02:00
Pierre Donias
1a5b93de9c feat: technical release (#5866) 2021-07-30 16:31:16 +02:00
Pierre Donias
0f165b33a6 feat: technical release (#5865) 2021-07-30 15:21:49 +02:00
Pierre Donias
4f53555f09 Revert "chore(backups/DeltaReplication): unify base VM detection" (#5861)
This reverts commit 9139c5e9d6.
See https://xcp-ng.org/forum/topic/4817
2021-07-30 14:55:00 +02:00
Pierre Donias
175be44823 feat(xo-web/VM/advanced): handle pv_in_pvh virtualization mode (#5857)
And handle unknown virtualization modes by showing the raw string
2021-07-28 18:41:22 +02:00
Julien Fontanet
20a6428290 fix(xo-server/xen-servers): fix lodash/pick import
Introduced by 4b4bea5f3

Fixes #5858
2021-07-28 08:48:17 +02:00
Julien Fontanet
4b4bea5f3b chore(xo-server): log ids on xapiObjectToXo errors 2021-07-27 15:05:00 +02:00
Pierre Donias
c82f860334 feat: technical release (#5856) 2021-07-27 11:08:53 +02:00
Pierre Donias
b2a56c047c feat(xo-server/clearHost): use pool's default migration network (#5851)
Fixes #5802
See xoa-support#3118
2021-07-27 10:44:30 +02:00
Julien Fontanet
bc6afc3933 fix(xo-server): don't fail on invalid pool pattern
Fixes #5849
2021-07-27 05:13:45 +02:00
Pierre Donias
280e4b65c3 feat(xo-web/VM/{shutdown,reboot}): ask user if they want to force when no tools (#5855)
Fixes #5838
2021-07-26 17:22:31 +02:00
Julien Fontanet
c6f22f4d75 fix(backups): block start_on operation on replicated VMs (#5852) 2021-07-26 15:01:11 +02:00
Pierre Donias
4bed8eb86f feat(xo-server-netbox): optionally allow self-signed certificates (#5850)
See https://xcp-ng.org/forum/topic/4786/netbox-plugin-does-not-allow-self-signed-certificate
2021-07-23 09:53:02 +02:00
Julien Fontanet
c482f18572 chore(xo-web/vm/tab-advanced): shutdown is a valid operation 2021-07-23 09:49:32 +02:00
Mathieu
d7668acd9b feat(xo-web/sr/tab-disks): display the active vdi of the basecopy (#5826)
See xoa-support#3446
2021-07-21 09:32:24 +02:00
Julien Fontanet
05b978c568 chore: update dev deps 2021-07-20 10:20:52 +02:00
Julien Fontanet
62e5ab6990 chore: update to http-request-plus@0.12.0 2021-07-20 10:03:16 +02:00
Mathieu
12216f1463 feat(xo-web/vm): rescan ISO SRs available in console view (#5841)
See xoa-support#3896
See xoa-support#3888
See xoa-support#3909
Continuity of d7940292d0
Introduced by f3501acb64
2021-07-16 17:02:10 +02:00
Pierre Donias
cbfa13a8b4 docs(netbox): make it clear that the uuid custom field needs to be lower case (#5843)
Fixes #5831
2021-07-15 09:45:05 +02:00
Pierre Donias
03ec0cab1e feat(xo-server-netbox): add data field to Netbox API errors (#5842)
Fixes #5834
2021-07-13 17:22:51 +02:00
mathieuRA
d7940292d0 feat(xo-web/vm): rescan ISO SRs available in console view 2021-07-12 11:55:02 +02:00
Julien Fontanet
9139c5e9d6 chore(backups/DeltaReplication): unify base VM detection
Might help avoiding the *unable to find base VM* error.
2021-07-09 15:14:37 +02:00
Julien Fontanet
65e62018e6 chore(backups/importDeltaVm): dont explicitly wait for export tasks
Might be related to stuck importation issues.
2021-07-08 09:56:06 +02:00
Julien Fontanet
138a3673ce fix(xo-server/importConfig): fix this._app.clean is not a function
Fixes #5836
2021-07-05 17:57:47 +02:00
Pierre Donias
096f443b56 feat: release 5.60.0 (#5833) 2021-06-30 15:49:52 +02:00
Pierre Donias
b37f30393d feat: technical release (#5832) 2021-06-30 11:07:14 +02:00
Ronan Abhamon
f095a05c42 feat(docs/load_balancing): add doc about VM anti-affinity mode (#5830)
* feat(docs/load_balancing): add doc about VM anti-affinity mode

Signed-off-by: Ronan Abhamon <ronan.abhamon@vates.fr>

* grammar edits for anti-affinity

Co-authored-by: Jon Sands <fohdeesha@gmail.com>
2021-06-30 10:37:25 +02:00
Pierre Donias
3d15a73f1b feat(xo-web/vm/new disk): generate random name (#5828) 2021-06-28 11:26:09 +02:00
Julien Fontanet
bbd571e311 chore(xo-web/vm/tab-disks.js): format with Prettier 2021-06-28 11:25:31 +02:00
Pierre Donias
a7c554f033 feat(xo-web/snapshots): identify VM's parent snapshot (#5824)
See xoa-support#3775
2021-06-25 12:07:50 +02:00
Pierre Donias
25b4532ce3 feat: technical release (#5825) 2021-06-25 11:13:23 +02:00
Pierre Donias
a304f50a6b fix(xo-server-netbox): compare compact notations of IPv6 (#5822)
XAPI doesn't use IPv6 compact notation while Netbox automatically compacts them
on creation. Comparing those 2 notations makes XO believe that the IPs in
Netbox should be deleted and new ones should be created, even though they're
actually the same IPs. This change compacts the IPs before comparing them.
2021-06-24 17:00:07 +02:00
Pierre Donias
e75f476965 fix(xo-server-netbox): filter out devices' interfaces (#5821)
See xoa-support#3812

In Netbox, a device interface and a VM interface can have the same ID `x`,
which means that listing IPs with `assigned_object_id=x` won't only get the
VM's interface's IPs but also the device's interface's IPs. This made XO
believe that those extra IPs shouldn't exist and delete them. This change
makes sure to only grab VM interface IPs.
2021-06-23 15:27:11 +02:00
Julien Fontanet
1c31460d27 fix(xo-server/disconnectXenServer): delete pool association
This should prevent the *server is already connected* issue after reinstalling host.
2021-06-23 10:11:12 +02:00
Julien Fontanet
19db468bf0 fix(CHANGELOG.unreleased): vhd-lib
Introduced by aa4f1b834
2021-06-23 09:26:23 +02:00
Julien Fontanet
5fe05578c4 fix(xo-server/backupNg.importVmBackup): returns id of imported VM
Fixes #5820

Introduced by d9ce1b3a9.
2021-06-22 18:26:01 +02:00
Julien Fontanet
956f5a56cf feat(backups/RemoteAdapter#cleanVm): fix backup size if necessary
Fixes #5810
Fixes #5815
2021-06-22 18:16:52 +02:00
Julien Fontanet
a3f589d740 feat(@xen-orchestra/proxy): 0.14.3 2021-06-21 14:36:55 +02:00
Julien Fontanet
beef09bb6d feat(@xen-orchestra/backups): 0.11.2 2021-06-21 14:30:32 +02:00
Julien Fontanet
ff0a246c28 feat(proxy/api/ndJsonStream): handle iterable error 2021-06-21 14:26:55 +02:00
Julien Fontanet
f1459a1a52 fix(backups/VmBackup#_callWriters): writers.delete
Introduced by 56e4847b6
2021-06-21 14:26:55 +02:00
Mathieu
f3501acb64 feat(xo-web/vm/tab-disks): rescan ISO SRs (#5814)
See https://xcp-ng.org/forum/topic/4588/add-rescan-iso-sr-from-vm-menu
2021-06-18 16:15:33 +02:00
Ronan Abhamon
2238c98e95 feat(load-balancer): log vm and host names when a VM is migrated + category (density, performance, ...) (#5808)
Co-authored-by: Julien Fontanet <julien.fontanet@isonoe.net>
2021-06-18 09:49:33 +02:00
Julien Fontanet
9658d43f1f feat(xo-server-load-balancer): use @xen-orchestra/log 2021-06-18 09:44:37 +02:00
Julien Fontanet
1748a0c3e5 chore(xen-api): remove unused inject-events 2021-06-17 16:41:04 +02:00
Julien Fontanet
4463d81758 feat(@xen-orchestra/proxy): 0.14.2 2021-06-17 15:58:00 +02:00
Julien Fontanet
74221a4ab5 feat(@xen-orchestra/backups): 0.11.1 2021-06-17 15:57:10 +02:00
Julien Fontanet
0d998ed342 feat(@xen-orchestra/xapi): 0.6.4 2021-06-17 15:56:21 +02:00
Julien Fontanet
7d5a01756e feat(xen-api): 0.33.1 2021-06-17 15:55:20 +02:00
Pierre Donias
d66313406b fix(xo-web/new-vm): show correct amount of memory in summary (#5817) 2021-06-17 14:36:44 +02:00
Pierre Donias
d96a267191 docs(web-hooks): add "wait for response" and backup related doc (#5819)
See #5420
See #5360
2021-06-17 14:34:03 +02:00
Julien Fontanet
5467583bb3 fix(backups/_VmBackup#_callWriters): dont run single writer twice
Introduced by 56e4847b6

See https://xcp-ng.org/forum/topic/4659/backup-failed
2021-06-17 14:14:48 +02:00
Rajaa.BARHTAOUI
9a8138d07b fix(xo-server-perf-alert): smart mode: select only running VMs and hosts (#5811) 2021-06-17 11:56:04 +02:00
Pierre Donias
36c290ffea feat(xo-web/jobs): add host.emergencyShutdownHost to the methods list (#5818) 2021-06-17 11:55:51 +02:00
Julien Fontanet
3413bf9f64 fix(xen-api/{get,put}Resource): distinguish cancelation and connection issue (2)
Follow up of 057a1cbab
2021-06-17 10:12:09 +02:00
Julien Fontanet
3c352a3545 fix(backups/_VmBackup#_callWriters): missing writer var
Fixes #5816
2021-06-17 08:53:38 +02:00
Julien Fontanet
56e4847b6b feat(backups/_VmBackup#_callWriters): dont use generic error when only one writer 2021-06-16 10:15:10 +02:00
Julien Fontanet
033b671d0b fix(xo-server): limit number of xapiObjectToXo logs
See xoa-support#3830
2021-06-16 09:59:07 +02:00
Julien Fontanet
51f013851d feat(xen-api): limit concurrent calls to 20
Fixes xoa-support#3767

Can be changed via `callConcurrency` option.
2021-06-14 18:37:58 +02:00
Yannick Achy
dafa4ced27 feat(docs/backups): new concurrency model (#5701) 2021-06-14 16:38:29 +02:00
Pierre Donias
05fe154749 fix(xo-server/xapi): don't silently swallow errors on _callInstallationPlugin (#5809)
See xoa-support#3738

Introduced by a73acedc4d

This was done to prevent triggering an error when the pack was already
installed but a XENAPI_PLUGIN_FAILURE error can happen for other reasons
2021-06-14 16:01:02 +02:00
Nick Zana
5ddceb4660 fix(docs/from sources): change GitHub URL to use TLS (#5813) 2021-06-14 00:34:42 +02:00
Julien Fontanet
341a1b195c fix(docs): filenames in how to update self-signed cert
See xoa-support#3821
2021-06-11 17:09:23 +02:00
Julien Fontanet
29c3d1f9a6 feat(xo-web/debug): add timing 2021-06-11 10:08:14 +02:00
Rajaa.BARHTAOUI
734d4fb92b fix(xo-server#listPoolsMatchingCriteria): fix "unknown error from the peer" error (#5807)
See xoa-support#3489

Introduced by cd8c618f08
2021-06-08 17:00:45 +02:00
Julien Fontanet
057a1cbab6 feat(xen-api/{get,put}Resource): distringuish cancelation and connection issue
See xoa-support#3643
2021-06-05 01:15:36 +02:00
Pierre Donias
d44509b2cd fix(xo-server/xapi-object-to-xo/vm): handle space-delimited IP addresses (#5805)
Fixes #5801
2021-06-04 10:01:08 +02:00
Julien Fontanet
58cf69795a fix(xo-server): remove broken API methods
Introduced bybdb0ca836

These methods were linked to the legacy backups which are no longer supported.
2021-06-03 14:49:18 +02:00
Julien Fontanet
6d39512576 chore: format with Prettier
Introduced by 059843f03
2021-06-03 14:49:14 +02:00
Julien Fontanet
ec4dde86f5 fix(CHANGELOG.unreleased): add missing entries
Introduced by 1c91fb9dd
2021-06-02 16:55:45 +02:00
Nicolas Raynaud
1c91fb9dd5 feat(xo-{server,web}): improve OVA import error reporting (#5797) 2021-06-02 16:23:08 +02:00
Yannick Achy
cbd650c5ef feat(docs/troubleshooting): set xoa SSH password (#5798) 2021-06-02 09:50:29 +02:00
Julien Fontanet
c5a769cb29 fix(xo-server/glob-matcher): fix micromatch import
Introduced by 254558e9d
2021-05-31 17:36:47 +02:00
170 changed files with 4095 additions and 3383 deletions

View File

@@ -24,7 +24,7 @@
"dependencies": {
"@vates/multi-key-map": "^0.1.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.2.1",
"@xen-orchestra/log": "^0.3.0",
"ensure-array": "^1.0.0"
}
}

View File

@@ -31,7 +31,7 @@
},
"dependencies": {
"@vates/decorate-with": "^0.1.0",
"@xen-orchestra/log": "^0.2.1",
"@xen-orchestra/log": "^0.3.0",
"golike-defer": "^0.5.1",
"object-hash": "^2.0.1"
},

View File

@@ -17,10 +17,10 @@ interface Record {
}
export class AuditCore {
constructor(storage: Storage) { }
public add(subject: any, event: string, data: any): Promise<Record> { }
public checkIntegrity(oldest: string, newest: string): Promise<number> { }
public getFrom(newest?: string): AsyncIterator { }
public deleteFrom(newest: string): Promise<void> { }
public deleteRangeAndRewrite(newest: string, oldest: string): Promise<void> { }
constructor(storage: Storage) {}
public add(subject: any, event: string, data: any): Promise<Record> {}
public checkIntegrity(oldest: string, newest: string): Promise<number> {}
public getFrom(newest?: string): AsyncIterator {}
public deleteFrom(newest: string): Promise<void> {}
public deleteRangeAndRewrite(newest: string, oldest: string): Promise<void> {}
}

View File

@@ -10,12 +10,13 @@ const { resolve } = require('path')
const adapter = new RemoteAdapter(require('@xen-orchestra/fs').getHandler({ url: 'file://' }))
module.exports = async function main(args) {
const { _, remove, merge } = getopts(args, {
const { _, fix, remove, merge } = getopts(args, {
alias: {
fix: 'f',
remove: 'r',
merge: 'm',
},
boolean: ['merge', 'remove'],
boolean: ['fix', 'merge', 'remove'],
default: {
merge: false,
remove: false,
@@ -25,7 +26,7 @@ module.exports = async function main(args) {
await asyncMap(_, async vmDir => {
vmDir = resolve(vmDir)
try {
await adapter.cleanVm(vmDir, { remove, merge, onLog: log => console.warn(log) })
await adapter.cleanVm(vmDir, { fixMetadata: fix, remove, merge, onLog: (...args) => console.warn(...args) })
} catch (error) {
console.error('adapter.cleanVm', vmDir, error)
}

View File

@@ -5,11 +5,12 @@ require('./_composeCommands')({
get main() {
return require('./commands/clean-vms')
},
usage: `[--merge] [--remove] xo-vm-backups/*
usage: `[--fix] [--merge] [--remove] xo-vm-backups/*
Detects and repair issues with VM backups.
Options:
-f, --fix Fix metadata issues (like size)
-m, --merge Merge (or continue merging) VHD files that are unused
-r, --remove Remove unused, incomplete, orphan, or corrupted files
`,

View File

@@ -7,8 +7,8 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.11.0",
"@xen-orchestra/fs": "^0.17.0",
"@xen-orchestra/backups": "^0.13.0",
"@xen-orchestra/fs": "^0.18.0",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",

View File

@@ -103,9 +103,21 @@ exports.VmBackup = class VmBackup {
// calls fn for each function, warns of any errors, and throws only if there are no writers left
async _callWriters(fn, warnMessage, parallel = true) {
const writers = this._writers
if (writers.size === 0) {
const n = writers.size
if (n === 0) {
return
}
if (n === 1) {
const [writer] = writers
try {
await fn(writer)
} catch (error) {
writers.delete(writer)
throw error
}
return
}
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
try {
await fn(writer)
@@ -291,12 +303,14 @@ exports.VmBackup = class VmBackup {
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
if (baseVm === undefined) {
debug('no base VM found')
return
}
const fullInterval = this._settings.fullInterval
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
debug('not using base VM becaust fullInterval reached')
return
}
@@ -311,6 +325,10 @@ exports.VmBackup = class VmBackup {
const srcVdi = srcVdis[snapshotOf]
if (srcVdi !== undefined) {
baseUuidToSrcVdi.set(await xapi.getField('VDI', baseRef, 'uuid'), srcVdi)
} else {
debug('no base VDI found', {
vdi: srcVdi.uuid,
})
}
})
@@ -323,7 +341,16 @@ exports.VmBackup = class VmBackup {
const fullVdisRequired = new Set()
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
if (!presentBaseVdis.has(baseUuid)) {
if (presentBaseVdis.has(baseUuid)) {
debug('found base VDI', {
base: baseUuid,
vdi: srcVdi.uuid,
})
} else {
debug('missing base VDI', {
base: baseUuid,
vdi: srcVdi.uuid,
})
fullVdisRequired.add(srcVdi.uuid)
}
})

View File

@@ -1,4 +1,5 @@
const assert = require('assert')
const sum = require('lodash/sum')
const { asyncMap } = require('@xen-orchestra/async-map')
const { default: Vhd, mergeVhd } = require('vhd-lib')
const { dirname, resolve } = require('path')
@@ -113,7 +114,7 @@ const listVhds = async (handler, vmDir) => {
return { vhds, interruptedVhds }
}
exports.cleanVm = async function cleanVm(vmDir, { remove, merge, onLog = noop }) {
exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, onLog = noop }) {
const handler = this._handler
const vhds = new Set()
@@ -219,11 +220,16 @@ exports.cleanVm = async function cleanVm(vmDir, { remove, merge, onLog = noop })
await asyncMap(jsons, async json => {
const metadata = JSON.parse(await handler.readFile(json))
const { mode } = metadata
let size
if (mode === 'full') {
const linkedXva = resolve('/', vmDir, metadata.xva)
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
size = await handler.getSize(linkedXva).catch(error => {
onLog(`failed to get size of ${json}`, { error })
})
} else {
onLog(`the XVA linked to the metadata ${json} is missing`)
if (remove) {
@@ -241,6 +247,10 @@ exports.cleanVm = async function cleanVm(vmDir, { remove, merge, onLog = noop })
// possible (existing disks) even if one disk is missing
if (linkedVhds.every(_ => vhds.has(_))) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
size = await asyncMap(linkedVhds, vhd => handler.getSize(vhd)).then(sum, error => {
onLog(`failed to get size of ${json}`, { error })
})
} else {
onLog(`Some VHDs linked to the metadata ${json} are missing`)
if (remove) {
@@ -249,6 +259,22 @@ exports.cleanVm = async function cleanVm(vmDir, { remove, merge, onLog = noop })
}
}
}
const metadataSize = metadata.size
if (size !== undefined && metadataSize !== size) {
onLog(`incorrect size in metadata: ${metadataSize ?? 'none'} instead of ${size}`)
// don't update if the the stored size is greater than found files,
// it can indicates a problem
if (fixMetadata && (metadataSize === undefined || metadataSize < size)) {
try {
metadata.size = size
await handler.writeFile(json, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
onLog(`failed to update size in backup metadata ${json}`, { error })
}
}
}
})
// TODO: parallelize by vm/job/vdi

View File

@@ -202,6 +202,7 @@ exports.importDeltaVm = defer(async function importDeltaVm(
blocked_operations: {
...vmRecord.blocked_operations,
start: 'Importing…',
start_on: 'Importing…',
},
ha_always_run: false,
is_a_template: false,
@@ -305,9 +306,6 @@ exports.importDeltaVm = defer(async function importDeltaVm(
}
}),
// Wait for VDI export tasks (if any) termination.
Promise.all(Object.values(streams).map(stream => stream.task)),
// Create VIFs.
asyncMap(Object.values(deltaVm.vifs), vif => {
let network = vif.$network$uuid && xapi.getObjectByUuid(vif.$network$uuid, undefined)

View File

@@ -7,23 +7,25 @@ const { execFile } = require('child_process')
const parse = createParser({
keyTransform: key => key.slice(5).toLowerCase(),
})
const makeFunction = command => async (fields, ...args) => {
const info = await fromCallback(execFile, command, [
'--noheading',
'--nosuffix',
'--nameprefixes',
'--unbuffered',
'--units',
'b',
'-o',
String(fields),
...args,
])
return info
.trim()
.split(/\r?\n/)
.map(Array.isArray(fields) ? parse : line => parse(line)[fields])
}
const makeFunction =
command =>
async (fields, ...args) => {
const info = await fromCallback(execFile, command, [
'--noheading',
'--nosuffix',
'--nameprefixes',
'--unbuffered',
'--units',
'b',
'-o',
String(fields),
...args,
])
return info
.trim()
.split(/\r?\n/)
.map(Array.isArray(fields) ? parse : line => parse(line)[fields])
}
exports.lvs = makeFunction('lvs')
exports.pvs = makeFunction('pvs')

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.11.0",
"version": "0.13.0",
"engines": {
"node": ">=14.6"
},
@@ -20,25 +20,25 @@
"@vates/disposable": "^0.1.1",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^0.17.0",
"@xen-orchestra/log": "^0.2.1",
"@xen-orchestra/fs": "^0.18.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^3.6.0",
"d3-time-format": "^3.0.0",
"end-of-stream": "^1.4.4",
"fs-extra": "^9.0.0",
"fs-extra": "^10.0.0",
"golike-defer": "^0.5.1",
"limit-concurrency-decorator": "^0.5.0",
"lodash": "^4.17.20",
"node-zone": "^0.4.0",
"parse-pairs": "^1.1.0",
"pump": "^3.0.0",
"promise-toolbox": "^0.19.2",
"vhd-lib": "^1.0.0",
"pump": "^3.0.0",
"vhd-lib": "^1.2.0",
"yazl": "^2.5.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^0.6.3"
"@xen-orchestra/xapi": "^0.7.0"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -106,9 +106,11 @@ exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinRepli
targetVm.ha_restart_priority !== '' &&
Promise.all([targetVm.set_ha_restart_priority(''), targetVm.add_tags('HA disabled')]),
targetVm.set_name_label(`${vm.name_label} - ${job.name} - (${formatFilenameDate(timestamp)})`),
targetVm.update_blocked_operations(
'start',
'Start operation for this vm is blocked, clone it if you want to use it.'
asyncMap(['start', 'start_on'], op =>
targetVm.update_blocked_operations(
op,
'Start operation for this vm is blocked, clone it if you want to use it.'
)
),
targetVm.update_other_config({
'xo:backup:sr': srUuid,

View File

@@ -1,5 +1,5 @@
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const { asyncMapSettled } = require('@xen-orchestra/async-map')
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const { formatDateTime } = require('@xen-orchestra/xapi')
const { formatFilenameDate } = require('../_filenameDate.js')
@@ -64,9 +64,11 @@ exports.FullReplicationWriter = class FullReplicationWriter extends MixinReplica
const targetVm = await xapi.getRecord('VM', targetVmRef)
await Promise.all([
targetVm.update_blocked_operations(
'start',
'Start operation for this vm is blocked, clone it if you want to use it.'
asyncMap(['start', 'start_on'], op =>
targetVm.update_blocked_operations(
op,
'Start operation for this vm is blocked, clone it if you want to use it.'
)
),
targetVm.update_other_config({
'xo:backup:sr': srUuid,

View File

@@ -16,7 +16,7 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
_cleanVm(options) {
return this._adapter
.cleanVm(getVmBackupDir(this._backup.vm.uuid), { ...options, onLog: warn, lock: false })
.cleanVm(getVmBackupDir(this._backup.vm.uuid), { ...options, fixMetadata: true, onLog: warn, lock: false })
.catch(warn)
}

View File

@@ -77,7 +77,11 @@ ${cliName} v${pkg.version}
'xo:backup:sr': tgtSr.uuid,
'xo:copy_of': srcSnapshotUuid,
}),
tgtVm.update_blocked_operations('start', 'Start operation for this vm is blocked, clone it if you want to use it.'),
Promise.all(
['start', 'start_on'].map(op =>
tgtVm.update_blocked_operations(op, 'Start operation for this vm is blocked, clone it if you want to use it.')
)
),
Promise.all(
userDevices.map(userDevice => {
const srcDisk = srcDisks[userDevice]

View File

@@ -18,7 +18,7 @@
"preferGlobal": true,
"dependencies": {
"golike-defer": "^0.5.1",
"xen-api": "^0.33.0"
"xen-api": "^0.34.3"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "0.17.0",
"version": "0.18.0",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
@@ -23,9 +23,9 @@
"@vates/coalesce-calls": "^0.1.0",
"@xen-orchestra/async-map": "^0.1.2",
"aws-sdk": "^2.686.0",
"decorator-synchronized": "^0.5.0",
"decorator-synchronized": "^0.6.0",
"execa": "^5.0.0",
"fs-extra": "^9.0.0",
"fs-extra": "^10.0.0",
"get-stream": "^6.0.0",
"limit-concurrency-decorator": "^0.5.0",
"lodash": "^4.17.4",
@@ -45,7 +45,7 @@
"async-iterator-to-stream": "^1.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"dotenv": "^8.0.0",
"dotenv": "^10.0.0",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -1,13 +1,13 @@
import asyncMapSettled from '@xen-orchestra/async-map/legacy'
import getStream from 'get-stream'
import path, { basename } from 'path'
import synchronized from 'decorator-synchronized'
import { coalesceCalls } from '@vates/coalesce-calls'
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { parse } from 'xo-remote-parser'
import { pipeline } from 'stream'
import { randomBytes } from 'crypto'
import { synchronized } from 'decorator-synchronized'
import normalizePath from './_normalizePath'
import { createChecksumStream, validChecksumOfReadStream } from './checksum'

View File

@@ -27,3 +27,12 @@ export const getHandler = (remote, ...rest) => {
}
return new Handler(remote, ...rest)
}
export const getSyncedHandler = async (...opts) => {
const handler = getHandler(...opts)
await handler.sync()
return {
dispose: () => handler.forget(),
value: handler,
}
}

View File

@@ -66,6 +66,10 @@ configure([
// if filter is a string, then it is pattern
// (https://github.com/visionmedia/debug#wildcards) which is
// matched against the namespace of the logs
//
// If it's an array, it will be handled as an array of filters
// and the transport will be used if any one of them match the
// current log
filter: process.env.DEBUG,
transport: transportConsole(),

View File

@@ -4,6 +4,42 @@ const { compileGlobPattern } = require('./utils')
// ===================================================================
const compileFilter = filter => {
if (filter === undefined) {
return
}
const type = typeof filter
if (type === 'function') {
return filter
}
if (type === 'string') {
const re = compileGlobPattern(filter)
return log => re.test(log.namespace)
}
if (Array.isArray(filter)) {
const filters = filter.map(compileFilter).filter(_ => _ !== undefined)
const { length } = filters
if (length === 0) {
return
}
if (length === 1) {
return filters[0]
}
return log => {
for (let i = 0; i < length; ++i) {
if (filters[i](log)) {
return true
}
}
return false
}
}
throw new TypeError('unsupported `filter`')
}
const createTransport = config => {
if (typeof config === 'function') {
return config
@@ -19,26 +55,15 @@ const createTransport = config => {
}
}
let { filter } = config
let transport = createTransport(config.transport)
const level = resolve(config.level)
const filter = compileFilter([config.filter, level === undefined ? undefined : log => log.level >= level])
let transport = createTransport(config.transport)
if (filter !== undefined) {
if (typeof filter === 'string') {
const re = compileGlobPattern(filter)
filter = log => re.test(log.namespace)
}
const orig = transport
transport = function (log) {
if ((level !== undefined && log.level >= level) || filter(log)) {
return orig.apply(this, arguments)
}
}
} else if (level !== undefined) {
const orig = transport
transport = function (log) {
if (log.level >= level) {
if (filter(log)) {
return orig.apply(this, arguments)
}
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/log",
"version": "0.2.1",
"version": "0.3.0",
"license": "ISC",
"description": "Logging system with decoupled producers/consumer",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/log",

View File

@@ -20,36 +20,8 @@ if (process.stdout !== undefined && process.stdout.isTTY && process.stderr !== u
}
const NAMESPACE_COLORS = [
196,
202,
208,
214,
220,
226,
190,
154,
118,
82,
46,
47,
48,
49,
50,
51,
45,
39,
33,
27,
21,
57,
93,
129,
165,
201,
200,
199,
198,
197,
196, 202, 208, 214, 220, 226, 190, 154, 118, 82, 46, 47, 48, 49, 50, 51, 45, 39, 33, 27, 21, 57, 93, 129, 165, 201,
200, 199, 198, 197,
]
formatNamespace = namespace => {
// https://werxltd.com/wp/2010/05/13/javascript-implementation-of-javas-string-hashcode-method/

View File

@@ -1,5 +1,6 @@
const get = require('lodash/get')
const identity = require('lodash/identity')
const isEqual = require('lodash/isEqual')
const { createLogger } = require('@xen-orchestra/log')
const { parseDuration } = require('@vates/parse-duration')
const { watch } = require('app-conf')
@@ -48,7 +49,7 @@ module.exports = class Config {
const watcher = config => {
try {
const value = processor(get(config, path))
if (value !== prev) {
if (!isEqual(value, prev)) {
prev = value
cb(value)
}

View File

@@ -14,14 +14,14 @@
"url": "https://vates.fr"
},
"license": "AGPL-3.0-or-later",
"version": "0.1.0",
"version": "0.1.1",
"engines": {
"node": ">=12"
},
"dependencies": {
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/emit-async": "^0.1.0",
"@xen-orchestra/log": "^0.2.1",
"@xen-orchestra/log": "^0.3.0",
"app-conf": "^0.9.0",
"lodash": "^4.17.21"
},

View File

@@ -28,9 +28,10 @@ export default {
buffer.toString('hex', offset + 5, offset + 6),
stringToEth: (string, buffer, offset) => {
const eth = /^([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2})$/.exec(
string
)
const eth =
/^([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2})$/.exec(
string
)
assert(eth !== null)
buffer.writeUInt8(parseInt(eth[1], 16), offset)
buffer.writeUInt8(parseInt(eth[2], 16), offset + 1)
@@ -50,9 +51,10 @@ export default {
),
stringToip4: (string, buffer, offset) => {
const ip = /^([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])$/.exec(
string
)
const ip =
/^([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])$/.exec(
string
)
assert(ip !== null)
buffer.writeUInt8(parseInt(ip[1], 10), offset)
buffer.writeUInt8(parseInt(ip[2], 10), offset + 1)

View File

@@ -33,7 +33,7 @@
"content-type": "^1.0.4",
"cson-parser": "^4.0.7",
"getopts": "^2.2.3",
"http-request-plus": "^0.10.0",
"http-request-plus": "^0.12",
"json-rpc-protocol": "^0.13.1",
"promise-toolbox": "^0.19.2",
"pump": "^3.0.0",

View File

@@ -36,7 +36,14 @@ async function main(argv) {
const { hostname = 'localhost', port } = config?.http?.listen?.https ?? {}
const { _: args, file, help, host, raw, token } = getopts(argv, {
const {
_: args,
file,
help,
host,
raw,
token,
} = getopts(argv, {
alias: { file: 'f', help: 'h' },
boolean: ['help', 'raw'],
default: {

View File

@@ -59,6 +59,13 @@ cert = '/var/lib/xo-proxy/certificate.pem'
key = '/var/lib/xo-proxy/key.pem'
port = 443
[logs]
# Display all logs matching this filter, regardless of their level
#filter = 'xo:backups:*'
# Display all logs with level >=, regardless of their namespace
level = 'info'
[remoteOptions]
mountsDir = '/run/xo-proxy/mounts'

View File

@@ -93,10 +93,7 @@ declare namespace event {
declare namespace backup {
type SimpleIdPattern = { id: string | { __or: string[] } }
declare namespace backup {
type SimpleIdPattern = { id: string | { __or: string[] } }
interface BackupJob {
interface BackupJob {
id: string
type: 'backup'
compression?: 'native' | 'zstd' | ''
@@ -146,13 +143,13 @@ declare namespace backup {
}
function listXoMetadataBackups(_: { remotes: { [id: string]: Remote } }): { [remoteId: string]: object[] }
function run(_: {
job: BackupJob | MetadataBackupJob
function run(_: {
job: BackupJob | MetadataBackupJob
remotes: { [id: string]: Remote }
schedule: Schedule
xapis?: { [id: string]: Xapi }
recordToXapi?: { [recordUuid: string]: string }
schedule: Schedule
xapis?: { [id: string]: Xapi }
recordToXapi?: { [recordUuid: string]: string }
streamLogs: boolean = false
}): string

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.14.1",
"version": "0.14.7",
"license": "AGPL-3.0-or-later",
"description": "XO Proxy used to remotely execute backup jobs",
"keywords": [
@@ -31,17 +31,17 @@
"@vates/decorate-with": "^0.1.0",
"@vates/disposable": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.11.0",
"@xen-orchestra/fs": "^0.17.0",
"@xen-orchestra/log": "^0.2.1",
"@xen-orchestra/backups": "^0.13.0",
"@xen-orchestra/fs": "^0.18.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.1.0",
"@xen-orchestra/mixins": "^0.1.1",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/xapi": "^0.6.3",
"@xen-orchestra/xapi": "^0.7.0",
"ajv": "^8.0.3",
"app-conf": "^0.9.0",
"async-iterator-to-stream": "^1.1.0",
"fs-extra": "^9.1.0",
"fs-extra": "^10.0.0",
"get-stream": "^6.0.0",
"getopts": "^2.2.3",
"golike-defer": "^0.5.1",
@@ -58,7 +58,7 @@
"source-map-support": "^0.5.16",
"stoppable": "^1.0.6",
"xdg-basedir": "^4.0.0",
"xen-api": "^0.33.0",
"xen-api": "^0.34.3",
"xo-common": "^0.7.0"
},
"devDependencies": {
@@ -72,7 +72,7 @@
"@vates/toggle-scripts": "^1.0.0",
"babel-plugin-transform-dev": "^2.0.1",
"cross-env": "^7.0.2",
"index-modules": "^0.4.0"
"index-modules": "^0.4.3"
},
"scripts": {
"_build": "index-modules --index-file index.mjs src/app/mixins && babel --delete-dir-on-start --keep-file-extension --source-maps --out-dir=dist/ src/",

View File

@@ -15,12 +15,23 @@ import { createLogger } from '@xen-orchestra/log'
const { debug, warn } = createLogger('xo:proxy:api')
const ndJsonStream = asyncIteratorToStream(async function* (responseId, iterable) {
yield format.response(responseId, { $responseType: 'ndjson' }) + '\n'
for await (const data of iterable) {
try {
yield JSON.stringify(data) + '\n'
} catch (error) {
warn('ndJsonStream', { error })
let headerSent = false
try {
for await (const data of iterable) {
if (!headerSent) {
yield format.response(responseId, { $responseType: 'ndjson' }) + '\n'
headerSent = true
}
try {
yield JSON.stringify(data) + '\n'
} catch (error) {
warn('ndJsonStream, item error', { error })
}
}
} catch (error) {
warn('ndJsonStream, fatal error', { error })
if (!headerSent) {
yield format.error(responseId, error)
}
}
})

View File

@@ -11,6 +11,7 @@ import { DurablePartition } from '@xen-orchestra/backups/DurablePartition.js'
import { execFile } from 'child_process'
import { formatVmBackups } from '@xen-orchestra/backups/formatVmBackups.js'
import { ImportVmBackup } from '@xen-orchestra/backups/ImportVmBackup.js'
import { JsonRpcError } from 'json-rpc-protocol'
import { Readable } from 'stream'
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.js'
import { RestoreMetadataBackup } from '@xen-orchestra/backups/RestoreMetadataBackup.js'
@@ -108,7 +109,7 @@ export default class Backups {
if (!__DEV__) {
const license = await app.appliance.getSelfLicense()
if (license === undefined) {
throw new Error('no valid proxy license')
throw new JsonRpcError('no valid proxy license')
}
}
return run.apply(this, arguments)

View File

@@ -0,0 +1,17 @@
import transportConsole from '@xen-orchestra/log/transports/console.js'
import { configure } from '@xen-orchestra/log/configure.js'
export default class Logs {
constructor(app) {
const transport = transportConsole()
app.config.watch('logs', ({ filter, level }) => {
configure([
{
filter: [process.env.DEBUG, filter],
level,
transport
}
])
})
}
}

View File

@@ -33,9 +33,9 @@
"chalk": "^4.1.0",
"exec-promise": "^0.7.0",
"form-data": "^4.0.0",
"fs-extra": "^9.0.0",
"fs-extra": "^10.0.0",
"get-stream": "^6.0.0",
"http-request-plus": "^0.10.0",
"http-request-plus": "^0.12",
"human-format": "^0.11.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/xapi",
"version": "0.6.3",
"version": "0.7.0",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
@@ -25,7 +25,7 @@
"xo-common": "^0.7.0"
},
"peerDependencies": {
"xen-api": "^0.33.0"
"xen-api": "^0.34.3"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
@@ -40,7 +40,7 @@
"dependencies": {
"@vates/decorate-with": "^0.1.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.2.1",
"@xen-orchestra/log": "^0.3.0",
"d3-time-format": "^3.0.0",
"golike-defer": "^0.5.1",
"lodash": "^4.17.15",

View File

@@ -1,15 +1,151 @@
# ChangeLog
## **next**
## **5.63.0** (2021-09-30)
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
### Highlights
- [Backup] Go back to previous page instead of going to the overview after editing a job: keeps current filters and page (PR [#5913](https://github.com/vatesfr/xen-orchestra/pull/5913))
- [Health] Do not take into consideration duplicated MAC addresses from CR VMs (PR [#5916](https://github.com/vatesfr/xen-orchestra/pull/5916))
- [Health] Ability to filter duplicated MAC addresses by running VMs (PR [#5917](https://github.com/vatesfr/xen-orchestra/pull/5917))
- [Tables] Move the search bar and pagination to the top of the table (PR [#5914](https://github.com/vatesfr/xen-orchestra/pull/5914))
- [Netbox] Handle nested prefixes by always assigning an IP to the smallest prefix it matches (PR [#5908](https://github.com/vatesfr/xen-orchestra/pull/5908))
### Bug fixes
- [SSH keys] Allow SSH key to be broken anywhere to avoid breaking page formatting (Thanks [@tstivers1990](https://github.com/tstivers1990)!) [#5891](https://github.com/vatesfr/xen-orchestra/issues/5891) (PR [#5892](https://github.com/vatesfr/xen-orchestra/pull/5892))
- [Netbox] Better handling and error messages when encountering issues due to UUID custom field not being configured correctly [#5905](https://github.com/vatesfr/xen-orchestra/issues/5905) [#5806](https://github.com/vatesfr/xen-orchestra/issues/5806) [#5834](https://github.com/vatesfr/xen-orchestra/issues/5834) (PR [#5909](https://github.com/vatesfr/xen-orchestra/pull/5909))
- [New VM] Don't send network config if untouched as all commented config can make Cloud-init fail [#5918](https://github.com/vatesfr/xen-orchestra/issues/5918) (PR [#5923](https://github.com/vatesfr/xen-orchestra/pull/5923))
### Released packages
- xen-api 0.34.3
- vhd-lib 1.2.0
- xo-server-netbox 0.3.1
- @xen-orchestra/proxy 0.14.7
- xo-server 5.82.3
- xo-web 5.88.0
## **5.62.1** (2021-09-17)
### Bug fixes
- [VM/Advanced] Fix conversion from UEFI to BIOS boot firmware (PR [#5895](https://github.com/vatesfr/xen-orchestra/pull/5895))
- [VM/network] Support newline-delimited IP addresses reported by some guest tools
- Fix VM/host stats, VM creation with Cloud-init, and VM backups, with NATted hosts [#5896](https://github.com/vatesfr/xen-orchestra/issues/5896)
- [VM/import] Very small VMDK and OVA files were mangled upon import (PR [#5903](https://github.com/vatesfr/xen-orchestra/pull/5903))
### Released packages
- xen-api 0.34.2
- @xen-orchestra/proxy 0.14.6
- xo-server 5.82.2
## **5.62.0** (2021-08-31)
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
### Highlights
- [Host] Add warning in case of unmaintained host version [#5840](https://github.com/vatesfr/xen-orchestra/issues/5840) (PR [#5847](https://github.com/vatesfr/xen-orchestra/pull/5847))
- [Backup] Use default migration network if set when importing/exporting VMs/VDIs (PR [#5883](https://github.com/vatesfr/xen-orchestra/pull/5883))
### Enhancements
- [New network] Ability for pool's admin to create a new network within the pool (PR [#5873](https://github.com/vatesfr/xen-orchestra/pull/5873))
- [Netbox] Synchronize primary IPv4 and IPv6 addresses [#5633](https://github.com/vatesfr/xen-orchestra/issues/5633) (PR [#5879](https://github.com/vatesfr/xen-orchestra/pull/5879))
### Bug fixes
- [VM/network] Fix an issue where multiple IPs would be displayed in the same tag when using old Xen tools. This also fixes Netbox's IP synchronization for the affected VMs. (PR [#5860](https://github.com/vatesfr/xen-orchestra/pull/5860))
- [LDAP] Handle groups with no members (PR [#5862](https://github.com/vatesfr/xen-orchestra/pull/5862))
- Fix empty button on small size screen (PR [#5874](https://github.com/vatesfr/xen-orchestra/pull/5874))
- [Host] Fix `Cannot read property 'other_config' of undefined` error when enabling maintenance mode (PR [#5875](https://github.com/vatesfr/xen-orchestra/pull/5875))
### Released packages
- xen-api 0.34.1
- @xen-orchestra/xapi 0.7.0
- @xen-orchestra/backups 0.13.0
- @xen-orchestra/fs 0.18.0
- @xen-orchestra/log 0.3.0
- @xen-orchestra/mixins 0.1.1
- xo-server-auth-ldap 0.10.4
- xo-server-netbox 0.3.0
- xo-server 5.82.1
- xo-web 5.87.0
## **5.61.0** (2021-07-30)
### Highlights
- [SR/disks] Display base copies' active VDIs (PR [#5826](https://github.com/vatesfr/xen-orchestra/pull/5826))
- [Netbox] Optionally allow self-signed certificates (PR [#5850](https://github.com/vatesfr/xen-orchestra/pull/5850))
- [Host] When supported, use pool's default migration network to evacuate host [#5802](https://github.com/vatesfr/xen-orchestra/issues/5802) (PR [#5851](https://github.com/vatesfr/xen-orchestra/pull/5851))
- [VM] shutdown/reboot: offer to force shutdown/reboot the VM if no Xen tools were detected [#5838](https://github.com/vatesfr/xen-orchestra/issues/5838) (PR [#5855](https://github.com/vatesfr/xen-orchestra/pull/5855))
### Enhancements
- [Netbox] Add information about a failed request to the error log to help better understand what happened [#5834](https://github.com/vatesfr/xen-orchestra/issues/5834) (PR [#5842](https://github.com/vatesfr/xen-orchestra/pull/5842))
- [VM/console] Ability to rescan ISO SRs (PR [#5841](https://github.com/vatesfr/xen-orchestra/pull/5841))
### Bug fixes
- [VM/disks] Fix `an error has occured` when self service user was on VM disk view (PR [#5841](https://github.com/vatesfr/xen-orchestra/pull/5841))
- [Backup] Protect replicated VMs from being started on specific hosts (PR [#5852](https://github.com/vatesfr/xen-orchestra/pull/5852))
### Released packages
- @xen-orchestra/backups 0.12.2
- @xen-orchestra/proxy 0.14.4
- xo-server-netbox 0.2.0
- xo-web 5.86.0
- xo-server 5.81.2
## **5.60.0** (2021-06-30)
### Highlights
- [VM/disks] Ability to rescan ISO SRs (PR [#5814](https://github.com/vatesfr/xen-orchestra/pull/5814))
- [VM/snapshots] Identify VM's current snapshot with an icon next to the snapshot's name (PR [#5824](https://github.com/vatesfr/xen-orchestra/pull/5824))
### Enhancements
- [OVA import] improve OVA import error reporting (PR [#5797](https://github.com/vatesfr/xen-orchestra/pull/5797))
- [Backup] Distinguish error messages between cancelation and interrupted HTTP connection
- [Jobs] Add `host.emergencyShutdownHost` to the list of methods that jobs can call (PR [#5818](https://github.com/vatesfr/xen-orchestra/pull/5818))
- [Host/Load-balancer] Log VM and host names when a VM is migrated + category (density, performance, ...) (PR [#5808](https://github.com/vatesfr/xen-orchestra/pull/5808))
- [VM/new disk] Auto-fill disk name input with generated unique name (PR [#5828](https://github.com/vatesfr/xen-orchestra/pull/5828))
### Bug fixes
- [IPs] Handle space-delimited IP address format provided by outdated guest tools [5801](https://github.com/vatesfr/xen-orchestra/issues/5801) (PR [5805](https://github.com/vatesfr/xen-orchestra/pull/5805))
- [API/pool.listPoolsMatchingCriteria] fix `unknown error from the peer` error (PR [5807](https://github.com/vatesfr/xen-orchestra/pull/5807))
- [Backup] Limit number of connections to hosts, which should reduce the occurences of `ECONNRESET`
- [Plugins/perf-alert] All mode: only selects running hosts and VMs (PR [5811](https://github.com/vatesfr/xen-orchestra/pull/5811))
- [New VM] Fix summary section always showing "0 B" for RAM (PR [#5817](https://github.com/vatesfr/xen-orchestra/pull/5817))
- [Backup/Restore] Fix _start VM after restore_ [5820](https://github.com/vatesfr/xen-orchestra/issues/5820)
- [Netbox] Fix a bug where some devices' IPs would get deleted from Netbox (PR [#5821](https://github.com/vatesfr/xen-orchestra/pull/5821))
- [Netbox] Fix an issue where some IPv6 would be deleted just to be immediately created again (PR [#5822](https://github.com/vatesfr/xen-orchestra/pull/5822))
### Released packages
- @vates/decorate-with 0.1.0
- xen-api 0.33.1
- @xen-orchestra/xapi 0.6.4
- @xen-orchestra/backups 0.12.0
- @xen-orchestra/proxy 0.14.3
- vhd-lib 1.1.0
- vhd-cli 0.4.0
- xo-server-netbox 0.1.2
- xo-server-perf-alert 0.3.2
- xo-server-load-balancer 0.7.0
- xo-server 5.80.0
- xo-web 5.84.0
## **5.59.0** (2021-05-31)
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
### Highlights
- [Smart backup] Report missing pools [#2844](https://github.com/vatesfr/xen-orchestra/issues/2844) (PR [#5768](https://github.com/vatesfr/xen-orchestra/pull/5768))
@@ -41,8 +177,6 @@
## **5.58.1** (2021-05-06)
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
### Bug fixes
- [Backups] Better handling of errors in remotes, fix `task has already ended`

View File

@@ -7,8 +7,6 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [OVA import] improve OVA import error reporting (PR [#5797](https://github.com/vatesfr/xen-orchestra/pull/5797))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
@@ -30,5 +28,4 @@
>
> In case of conflict, the highest (lowest in previous list) `$version` wins.
- xo-server patch
- xo-web patch
- @xen-orchestra/proxy minor

View File

@@ -114,17 +114,18 @@ We need your feedback on this feature!
The plugin "web-hooks" needs to be installed and loaded for this feature to work.
You can trigger an HTTP POST request to a URL when a Xen Orchestra API method is called.
You can trigger an HTTP POST request to a URL when a Xen Orchestra API method is called or when a backup job runs.
- Go to Settings > Plugins > Web hooks
- Add new hooks
- For each hook, configure:
- Method: the XO API method that will trigger the HTTP request when called
- Method: the XO API method that will trigger the HTTP request when called. For backup jobs, choose `backupNg.runJob`.
- Type:
- pre: the request will be sent when the method is called
- post: the request will be sent after the method action is completed
- pre/post: both
- URL: the full URL which the requests will be sent to
- Wait for response: you can choose to wait for the web hook response before the method is actually called ("pre" hooks only). This can be useful if you need to automatically run some tasks before a certain method is called.
- Save the plugin configuration
From now on, a request will be sent to the corresponding URLs when a configured method is called by an XO client.
@@ -340,13 +341,14 @@ XO will try to find the right prefix for each IP address. If it can't find a pre
- Create a token with "Write enabled"
- Add a UUID custom field:
- Got to Admin > Custom fields > Add custom field
- Create a custom field called "uuid"
- Create a custom field called "uuid" (lower case!)
- Assign it to object types `virtualization > cluster` and `virtualization > virtual machine`
![](./assets/customfield.png)
- Go to Xen Orchestra > Settings > Plugins > Netbox and fill out the configuration:
- Endpoint: the URL of your Netbox instance (e.g.: `https://netbox.company.net`)
- Unauthorized certificate: only for HTTPS, enable this option if your Netbox instance uses a self-signed SSL certificate
- Token: the token you generated earlier
- Pools: the pools you wish to automatically synchronize with Netbox
- Interval: the time interval (in hours) between 2 auto-synchronizations. Leave empty if you don't want to synchronize automatically.

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

View File

@@ -87,3 +87,7 @@ You need to be an admin:
![Mattermost configuration](./assets/DocImg8.png)
![Mattermost](./assets/DocImg9.png)
## Web hooks
You can also configure web hooks to be sent to a custom server before and/or after a backup job runs. This won't send a formatted report but raw JSON data that you can use in custom scripts on your side. Follow the [web-hooks plugin documentation](./advanced.html#web-hooks) to configure it.

View File

@@ -283,39 +283,40 @@ When it's done exporting, we'll remove the snapshot. Note: this operation will t
### Concurrency
Concurrency is a parameter that let you define how many VMs your backup job will manage simultaneously.
:::tip
- Default concurrency value is 2 if left empty.
:::
Let's say you want to backup 50 VMs (each with 1x disk) at 3:00 AM. There are **2 different strategies**:
1. backup VM #1 (snapshot, export, delete snapshots) **then** backup VM #2 -> _fully sequential strategy_
2. snapshot all VMs, **then** export all snapshots, **then** delete all snapshots for finished exports -> _fully parallel strategy_
The first purely sequential strategy will lead to a big problem: **you can't predict when a snapshot of your data will occur**. Because you can't predict the first VM export time (let's say 3 hours), then your second VM will have its snapshot taken 3 hours later, at 6 AM. We assume that's not what you meant when you specified "backup everything at 3 AM". You would end up with data from 6 AM (and later) for other VMs.
Strategy number 2 is better in this aspect: all the snapshots will be taken at 3 AM. However **it's risky without limits**: it means potentially doing 50 snapshots or more at once on the same storage. **Since XenServer doesn't have a queue**, it will try to do all of them at once. This is also prone to race conditions and could cause crashes on your storage.
So what's the best choice? Continue below to learn how to best configure concurrency for your needs.
#### Best choice
By default the _parallel strategy_ is, on paper, the most logical one. But we need to give it some limits on concurrency.
The first purely sequential strategy will lead to the fact that: **you can't predict when a snapshot of your data will occur**. Because you can't predict the first VM export time (let's say 3 hours), then your second VM will have its snapshot taken 3 hours later, at 6 AM.
:::tip
Xen Orchestra can be connected to multiple pools at once. So the concurrency number applies **per pool**.
If you need your backup to be done at a specific time you should consider creating a specific backup task for this VM.
:::
Each step has its own concurrency to fit its requirements:
Strategy number 2 is to parallelise: all the snapshots will be taken at 3 AM. However **it's risky without limits**: it means potentially doing 50 snapshots or more at once on the same storage. **Since XenServer doesn't have a queue**, it will try to do all of them at once. This is also prone to race conditions and could cause crashes on your storage.
- **snapshot process** needs to be performed with the lowest concurrency possible. 2 is a good compromise: one snapshot is fast, but a stuck snapshot won't block the whole job. That's why a concurrency of 2 is not too bad on your storage. Basically, at 3 AM, we'll do all the VM snapshots needed, 2 at a time.
- **disk export process** is bottlenecked by XCP-ng/XenServer - so to get the most of it, you can use up to 12 in parallel. As soon a snapshot is done, the export process will start, until reaching 12 at once. Then as soon as one in those 12 is finished, another one will appear until there is nothing more to export.
- **VM export process:** the 12 disk export limit mentioned above applies to VDI exports, which happen during delta exports. For full VM exports (for example, for full backup job types), there is a built in limit of 2. This means if you have a full backup job of 6 VMs, only 2 will be exported at once.
- **snapshot deletion** can't happen all at once because the previous step durations are random - no need to implement concurrency on this one.
By default the _parallel strategy_ is, on paper, the most logical one. But you need to be careful and give it some limits on concurrency.
This is how it currently works in Xen Orchestra. But sometimes, you also want to have _sequential_ backups combined with the _parallel strategy_. That's why we introduced a sequential option in the advanced section of backup-ng:
:::tip
0 means it will be fully **parallel** for all VMs.
:::danger
High concurrency could impact your dom0 and network performances.
:::
If you job contains 50 VMs for example, you could specify a sequential backup with a limit of "25 at once" (enter 25 in the concurrency field). This means at 3 AM, we'll do 25 snapshots (2 at a time), then exports. As soon as the first VM backup is completely finished (snapshot removed), then we'll start the 26th and so on, to always keep a max of 25x VM backups going in parallel.
You should be aware of your hardware limitation when defining the best concurrency for your XCP-ng infrastructure, never put concurrency too high or you could impact your VMs performances.
The best way to define the best concurrency for you is by increasing it slowly and watching the result on backup time.
So to summarize, if you set your concurrency at 6 and you have 20 Vms to backup the process will be the following:
- We start the backup of the first 6 VMs.
- When one VM backup as ended we will launch the next VM backup.
- We're keep launching new VM backup until the 20 VMs are finished, keeping 6 backups running.
Removing the snapshot will trigger the coalesce process for the first VM, this is an automated action not triggered directly by the backup job.
## Backup modifier tags

View File

@@ -46,7 +46,7 @@ apt-get install build-essential redis-server libpng-dev git python-minimal libvh
You need to use the `git` source code manager to fetch the code. Ideally, you should run XO as a non-root user, and if you choose to, you need to set up `sudo` to be able to mount NFS remotes. As your chosen non-root (or root) user, run the following:
```
git clone -b master http://github.com/vatesfr/xen-orchestra
git clone -b master https://github.com/vatesfr/xen-orchestra
```
> Note: xo-server and xo-web have been migrated to the [xen-orchestra](https://github.com/vatesfr/xen-orchestra) mono-repository - so you only need the single clone command above

View File

@@ -1,6 +1,6 @@
# Full backups
You can schedule full backups of your VMs, by exporting them to the local XOA file-system, or directly to an NFS or SMB share. The "rentention" parameter allows you to modify how many backups are retained (by removing the oldest one).
You can schedule full backups of your VMs, by exporting them to the local XOA file-system, or directly to an NFS or SMB share. The "retention" parameter allows you to modify how many backups are retained (by removing the oldest one).
[![](./assets/backupexample.png)](https://xen-orchestra.com/blog/backup-your-xenserver-vms-with-xen-orchestra/)

View File

@@ -20,7 +20,7 @@ Once you have started the VM, you can access the web UI by putting the IP you co
:::tip
- Default Web UI credentials are `admin@admin.net` / `admin`
- Default console/SSH credentials are `xoa` / `xoa` (first login)
- Default console/SSH credentials are not set, you need to set them [as described here](troubleshooting.md#set-or-recover-xoa-vm-password).
:::
### Registration

View File

@@ -94,3 +94,21 @@ The global situation (resource usage) is examined **every minute**.
:::tip
TODO: more details to come here
:::
## VM anti-affinity
VM anti-affinity is a feature that prevents VMs with the same user tags from running on the same host. This functionality is available directly in the load-balancer plugin.
This way, you can avoid having pairs of redundant VMs or similar running on the same host.
Let's look at a simple example: you have multiple VMs running MySQL and PostgreSQL with high availability/replication. Obviously, you don't want to lose the replicated database inside the VMs on the same physical host. Just create your plan like this:
![](./assets/antiaffinity.png)
- Simple plan: means no active load balancing mechanism used
- Anti-affinity: we added our 2x tags, meaning any VMs with one of these tags will never run on the same host (if possible) with another VM having the same tag
You can also use the performance plan with the anti-affinity mode activated to continue to migrate non-tagged VMs.
:::tip
This feature is not limited by the number of VMs using the same tag, i.e. if you have 6 VMs with the same anti-affinity tag and 2 hosts, the plugin will always try to place 3 VMs on each host. It will distribute as much as possible the VMs fairly and it takes precedence (in the majority of the cases) over the performance algorithm.
:::

View File

@@ -320,6 +320,7 @@ You can learn more about XenServer [resource management on the Citrix Website](h
:::tip
XCP-ng doesn't limit VMs to 32 vCPU
:::
### VDI live migration
Thanks to Xen Storage Motion, it's easy to move a VM disk from one storage location to another, while the VM is running! This feature can help you migrate from your local storage to a SAN, or just upgrade your SAN without any downtime.
@@ -491,10 +492,12 @@ If you are behind a proxy, please update your `xo-server` configuration to add a
::: danger
As specified in the [documentation](https://xcp-ng.org/docs/requirements.html#pool-requirements) your pool shouldn't consist of hosts from different CPU vendors.
:::
::: warning
- Even with matching CPU vendors, in the case of different CPU models XCP-ng will scale the pool CPU ability to the CPU having the least instructions.
- All the hosts in a pool must run the same XCP-ng version.
:::
### Creating a pool
First you should add your new host to XOA by going to New > Server as described in [the relevant chapter](manage_infrastructure.md#add-a-host).

View File

@@ -59,9 +59,11 @@ While creating a standard backup job from your main Xen Orchestra appliance, you
Login is disabled by default on proxy appliances.
If you need to login for some reason, you need to set a password for the xoa user via the XenStore of the VM. The following is to be ran on your XCP-ng host:
```
xe vm-param-set uuid=<UUID> xenstore-data:vm-data/system-account-xoa-password=<password>
```
Where UUID is the uuid of your proxy VM.
Then you need to restart the proxy VM.
@@ -74,15 +76,19 @@ First you will need to add a second VIF to your Proxy VM. This can be done in th
After adding the VIF you will need to set an IP for the new NIC, for that you will first need to SSH to the VM [as describe before](/proxy.md#enabling-login-to-proxy-appliance).
Then set the new IP:
```
$ xoa network static eth1
? Static IP for this machine 192.168.100.120
? Network mask (eg 255.255.255.0) 255.255.255.0
```
If you want to set a static address.
```
$ xoa network dhcp eth1
```
If you prefer using DHCP.
:::tip
As XOA uses the first IP address reported by XAPI to contact the proxy appliance, you may have to switch the network card order if you want your proxy to be connected through a specific IP address.

View File

@@ -16,6 +16,18 @@ It means you don't have a default SR set on the pool you are importing XOA on. T
XOA uses HVM mode. If your physical host doesn't support virtualization extensions, XOA won't work. To check if your XenServer supports hardware assisted virtualization (HVM), you can enter this command in your host: `grep --color vmx /proc/cpuinfo`. If you don't have any result, it means XOA won't work on this hardware.
## Set or recover XOA VM password
As no password is set for the xoa system user by default, you will need to set your own. This can be done via the XenStore data of the VM. The following is to be ran on your XCP-ng host:
```
xe vm-param-set uuid=<UUID> xenstore-data:vm-data/system-account-xoa-password=<password>
```
Where UUID is the uuid of your XOA VM.
Then you need to restart the VM.
## Recover web login password
If you have lost your password to log in to the XOA webpage, you can reset it. From the XOA CLI (for login/access info for the CLI, [see here](xoa.md#first-console-connection)), use the following command and insert the email/account you wish to recover:
@@ -162,9 +174,9 @@ Connect to your appliance via SSH, then as root execute these commands:
```
$ cd /etc/ssl
$ cp server.crt server.crt.old
$ cp server.key server.key.old
$ openssl req -x509 -newkey rsa:2048 -keyout server.key -out server.crt -nodes -days 360
$ cp cert.pem cert.pem-old
$ cp key.pem key.pem-old
$ openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -nodes -days 360
$ systemctl restart xo-server.service
```

View File

@@ -10,7 +10,7 @@ By design, the updater is only available in XOA. If you are using XO from the so
## Requirements
In order to work, the updater needs access to `xen-orchestra.com` (port 443).
In order to work, the updater needs access to `xen-orchestra.com` (port 443) and `nodejs.org` (port 443).
## Usage

View File

@@ -97,59 +97,25 @@ After the VM is imported, you just need to start it with `xe vm-start vm="XOA"`
## First console connection
If you connect via SSH or console, the default credentials are:
### Deployed with the [web deploy form](https://xen-orchestra.com/#!/xoa)
- user: xoa
- password: xoa
In that case, you already set the password for `xoa` user. If you forgot it, see below.
During your first connection, the system will ask you to:
### Manually deployed
- enter the current password again (`xoa`)
- enter your new password
- retype your new password
When it's done, you'll be disconnected, so reconnect again with your new password.
Here is an example when you connect via SSH for the first time:
If you connect via SSH or console for the first time without using our [web deploy form](https://xen-orchestra.com/#!/xoa), be aware **there's NO default password set for security reasons**. To set it, you need to connect to your host to find the XOA VM UUID (eg via `xe vm-list`).
Then replace `<UUID>` with the previously find UUID, and `<password>` with your password:
```
$ ssh xoa@192.168.100.146
Warning: Permanently added '192.168.100.146' (ECDSA) to the list of known hosts.
xoa@192.168.100.146's password:
You are required to change your password immediately (root enforced)
__ __ ____ _ _
\ \ / / / __ \ | | | |
\ V / ___ _ __ | | | |_ __ ___| |__ ___ ___| |_ _ __ __ _
> < / _ \ '_ \ | | | | '__/ __| '_ \ / _ \/ __| __| '__/ _` |
/ . \ __/ | | | | |__| | | | (__| | | | __/\__ \ |_| | | (_| |
/_/ \_\___|_| |_| \____/|_| \___|_| |_|\___||___/\__|_| \__,_|
Welcome to XOA Unified Edition, with Pro Support.
* Restart XO: sudo systemctl restart xo-server.service
* Display logs: sudo systemctl status xo-server.service
* Register your XOA: sudo xoa-updater --register
* Update your XOA: sudo xoa-updater --upgrade
OFFICIAL XOA DOCUMENTATION HERE: https://xen-orchestra.com/docs/xoa.html
Support available at https://xen-orchestra.com/#!/member/support
Build number: 16.10.24
Based on Debian GNU/Linux 8 (Stable) 64bits in PVHVM mode
WARNING: Your password has expired.
You must change your password now and login again!
Changing password for xoa.
(current) UNIX password:
Enter new UNIX password:
Retype new UNIX password:
passwd: password updated successfully
Connection to 192.168.100.146 closed.
$
xe vm-param-set uuid=<UUID> xenstore-data:vm-data/system-account-xoa-password=<password>
```
:::tip
Don't forget to use quotes for your password, eg: `xenstore-data:vm-data/system-account-xoa-password='MyPassW0rd!'`
:::
Then, you could connect with `xoa` username and the password you defined in the previous command, eg with `ssh xoa@<XOA IP ADDRESS>`.
### Using sudo
To avoid typing `sudo` for any admin command, you can have a root shell with `sudo -s`:

View File

@@ -12,14 +12,14 @@
"eslint-plugin-eslint-comments": "^3.2.0",
"eslint-plugin-import": "^2.22.1",
"eslint-plugin-node": "^11.1.0",
"eslint-plugin-promise": "^4.2.1",
"eslint-plugin-promise": "^5.1.0",
"eslint-plugin-react": "^7.21.5",
"exec-promise": "^0.7.0",
"globby": "^11.0.1",
"handlebars": "^4.7.6",
"husky": "^4.2.5",
"jest": "^26.0.1",
"lint-staged": "^10.2.7",
"lint-staged": "^11.1.2",
"lodash": "^4.17.4",
"prettier": "^2.0.5",
"promise-toolbox": "^0.19.2",

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "vhd-cli",
"version": "0.3.1",
"version": "0.4.0",
"license": "ISC",
"description": "Tools to read/create and merge VHD files",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-cli",
@@ -24,11 +24,11 @@
"node": ">=8.10"
},
"dependencies": {
"@xen-orchestra/fs": "^0.17.0",
"@xen-orchestra/fs": "^0.18.0",
"cli-progress": "^3.1.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"vhd-lib": "^1.0.0"
"vhd-lib": "^1.2.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -36,7 +36,7 @@
"@babel/preset-env": "^7.0.0",
"cross-env": "^7.0.2",
"execa": "^5.0.0",
"index-modules": "^0.3.0",
"index-modules": "^0.4.3",
"promise-toolbox": "^0.19.2",
"rimraf": "^3.0.0",
"tmp": "^0.2.1"

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "vhd-lib",
"version": "1.0.0",
"version": "1.2.0",
"license": "AGPL-3.0-or-later",
"description": "Primitives for VHD file handling",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
@@ -17,9 +17,9 @@
},
"dependencies": {
"@vates/read-chunk": "^0.1.2",
"@xen-orchestra/log": "^0.2.1",
"@xen-orchestra/log": "^0.3.0",
"async-iterator-to-stream": "^1.0.2",
"fs-extra": "^9.0.0",
"fs-extra": "^10.0.0",
"limit-concurrency-decorator": "^0.5.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.19.2",
@@ -30,7 +30,7 @@
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@xen-orchestra/fs": "^0.17.0",
"@xen-orchestra/fs": "^0.18.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"execa": "^5.0.0",

View File

@@ -2,7 +2,7 @@ import assert from 'assert'
import { pipeline, Transform } from 'readable-stream'
import { readChunk } from '@vates/read-chunk'
import checkFooter from './_checkFooter'
import checkFooter from './checkFooter'
import checkHeader from './_checkHeader'
import noop from './_noop'
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'

View File

@@ -8,3 +8,4 @@ export { default as createSyntheticStream } from './createSyntheticStream'
export { default as mergeVhd } from './merge'
export { default as createVhdStreamWithLength } from './createVhdStreamWithLength'
export { default as peekFooterFromVhdStream } from './peekFooterFromVhdStream'
export { default as checkFooter } from './checkFooter'

View File

@@ -1,7 +1,7 @@
import assert from 'assert'
import { createLogger } from '@xen-orchestra/log'
import checkFooter from './_checkFooter'
import checkFooter from './checkFooter'
import checkHeader from './_checkHeader'
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'

View File

@@ -39,7 +39,7 @@
"human-format": "^0.11.0",
"lodash": "^4.17.4",
"pw": "^0.0.4",
"xen-api": "^0.33.0"
"xen-api": "^0.34.3"
},
"devDependencies": {
"@babel/cli": "^7.1.5",

View File

@@ -8,6 +8,6 @@
"promise-toolbox": "^0.19.2",
"readable-stream": "^3.1.1",
"throttle": "^1.0.3",
"vhd-lib": "^1.0.0"
"vhd-lib": "^1.2.0"
}
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xen-api",
"version": "0.33.0",
"version": "0.34.3",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [
@@ -34,10 +34,11 @@
"bind-property-descriptor": "^1.0.0",
"blocked": "^1.2.1",
"debug": "^4.0.1",
"http-request-plus": "^0.10.0",
"http-request-plus": "^0.12",
"jest-diff": "^26.4.2",
"json-rpc-protocol": "^0.13.1",
"kindof": "^2.0.0",
"limit-concurrency-decorator": "^0.5.0",
"lodash": "^4.17.4",
"make-error": "^1.3.0",
"minimist": "^1.2.0",

View File

@@ -7,6 +7,7 @@ import { Collection } from 'xo-collection'
import { EventEmitter } from 'events'
import { map, noop, omit } from 'lodash'
import { cancelable, defer, fromCallback, fromEvents, ignoreErrors, pDelay, pRetry, pTimeout } from 'promise-toolbox'
import { limitConcurrency } from 'limit-concurrency-decorator'
import autoTransport from './transports/auto'
import coalesceCalls from './_coalesceCalls'
@@ -88,6 +89,8 @@ export class Xapi extends EventEmitter {
this._RecordsByType = { __proto__: null }
this._reverseHostIpAddresses = opts.reverseHostIpAddresses ?? false
this._call = limitConcurrency(opts.callConcurrency ?? 20)(this._call)
this._roCallRetryOptions = {
delay: 1e3,
tries: 10,
@@ -356,22 +359,35 @@ export class Xapi extends EventEmitter {
}
}
const response = await httpRequest(
$cancelToken,
this._url,
host !== undefined && {
hostname: await this._getHostAddress(this.getObject(host)),
},
let url = new URL('http://localhost')
url.protocol = this._url.protocol
url.pathname = pathname
url.search = new URLSearchParams(query)
await this._setHostAddressInUrl(url, host)
const response = await pRetry(
async () =>
httpRequest($cancelToken, url.href, {
rejectUnauthorized: !this._allowUnauthorized,
// this is an inactivity timeout (unclear in Node doc)
timeout: this._httpInactivityTimeout,
maxRedirects: 0,
// Support XS <= 6.5 with Node => 12
minVersion: 'TLSv1',
}),
{
pathname,
query,
rejectUnauthorized: !this._allowUnauthorized,
// this is an inactivity timeout (unclear in Node doc)
timeout: this._httpInactivityTimeout,
// Support XS <= 6.5 with Node => 12
minVersion: 'TLSv1',
when: { code: 302 },
onRetry: async error => {
const response = error.response
if (response === undefined) {
throw error
}
response.cancel()
url = await this._replaceHostAddressInUrl(new URL(response.headers.location, url))
},
}
)
@@ -418,32 +434,28 @@ export class Xapi extends EventEmitter {
headers['content-length'] = '1125899906842624'
}
const doRequest = httpRequest.put.bind(
undefined,
$cancelToken,
this._url,
host !== undefined && {
hostname: await this._getHostAddress(this.getObject(host)),
},
{
body,
headers,
pathname,
query,
rejectUnauthorized: !this._allowUnauthorized,
const url = new URL('http://localhost')
url.protocol = this._url.protocol
url.pathname = pathname
url.search = new URLSearchParams(query)
await this._setHostAddressInUrl(url, host)
// this is an inactivity timeout (unclear in Node doc)
timeout: this._httpInactivityTimeout,
const doRequest = httpRequest.put.bind(undefined, $cancelToken, {
body,
headers,
rejectUnauthorized: !this._allowUnauthorized,
// Support XS <= 6.5 with Node => 12
minVersion: 'TLSv1',
}
)
// this is an inactivity timeout (unclear in Node doc)
timeout: this._httpInactivityTimeout,
// Support XS <= 6.5 with Node => 12
minVersion: 'TLSv1',
})
// if body is a stream, sends a dummy request to probe for a redirection
// before consuming body
const response = await (isStream
? doRequest({
? doRequest(url.href, {
body: '',
// omit task_id because this request will fail on purpose
@@ -453,9 +465,9 @@ export class Xapi extends EventEmitter {
}).then(
response => {
response.cancel()
return doRequest()
return doRequest(url.href)
},
error => {
async error => {
let response
if (error != null && (response = error.response) != null) {
response.cancel()
@@ -466,14 +478,16 @@ export class Xapi extends EventEmitter {
} = response
if (statusCode === 302 && location !== undefined) {
// ensure the original query is sent
return doRequest(location, { query })
const newUrl = new URL(location, url)
newUrl.searchParams.set('task_id', query.task_id)
return doRequest((await this._replaceHostAddressInUrl(newUrl)).href)
}
}
throw error
}
)
: doRequest())
: doRequest(url.href))
if (pTaskResult !== undefined) {
pTaskResult = pTaskResult.catch(error => {
@@ -789,7 +803,35 @@ export class Xapi extends EventEmitter {
}
}
async _getHostAddress({ address }) {
async _setHostAddressInUrl(url, host) {
const pool = this._pool
const poolMigrationNetwork = pool.other_config['xo:migrationNetwork']
if (host === undefined) {
if (poolMigrationNetwork === undefined) {
const xapiUrl = this._url
url.hostname = xapiUrl.hostname
url.port = xapiUrl.port
return
}
host = await this.getRecord('host', pool.master)
}
let { address } = host
if (poolMigrationNetwork !== undefined) {
const hostPifs = new Set(host.PIFs)
try {
const networkRef = await this._roCall('network.get_by_uuid', [poolMigrationNetwork])
const networkPifs = await this.getField('network', networkRef, 'PIFs')
const migrationNetworkPifRef = networkPifs.find(hostPifs.has, hostPifs)
address = await this.getField('PIF', migrationNetworkPifRef, 'IP')
} catch (error) {
console.warn('unable to get the host address linked to the pool migration network', poolMigrationNetwork, error)
}
}
if (this._reverseHostIpAddresses) {
try {
;[address] = await fromCallback(dns.reverse, address)
@@ -797,7 +839,8 @@ export class Xapi extends EventEmitter {
console.warn('reversing host address', address, error)
}
}
return address
url.hostname = address
}
_setUrl(url) {
@@ -859,6 +902,19 @@ export class Xapi extends EventEmitter {
}
}
async _replaceHostAddressInUrl(url) {
try {
// TODO: look for hostname in all addresses of this host (including all its PIFs)
const host = (await this.getAllRecords('host')).find(host => host.address === url.hostname)
if (host !== undefined) {
await this._setHostAddressInUrl(url, host)
}
} catch (error) {
console.warn('_replaceHostAddressInUrl', url, error)
}
return url
}
_processEvents(events) {
const flush = this._objects.bufferEvents()
events.forEach(event => {

View File

@@ -1,31 +0,0 @@
#!/usr/bin/env node
import { pDelay } from 'promise-toolbox'
import { createClient } from './'
async function main([url]) {
const xapi = createClient({
allowUnauthorized: true,
url,
watchEvents: false,
})
await xapi.connect()
let loop = true
process.on('SIGINT', () => {
loop = false
})
const { pool } = xapi
// eslint-disable-next-line no-unmodified-loop-condition
while (loop) {
await pool.update_other_config('xo:injectEvents', Math.random().toString(36).slice(2))
await pDelay(1e2)
}
await pool.update_other_config('xo:injectEvents', null)
await xapi.disconnect()
}
main(process.argv.slice(2)).catch(console.error)

View File

@@ -15,24 +15,28 @@ const authorized = () => true // eslint-disable-line no-unused-vars
const forbiddden = () => false // eslint-disable-line no-unused-vars
// eslint-disable-next-line no-unused-vars
const and = (...checkers) => (object, permission) => {
for (const checker of checkers) {
if (!checker(object, permission)) {
return false
const and =
(...checkers) =>
(object, permission) => {
for (const checker of checkers) {
if (!checker(object, permission)) {
return false
}
}
return true
}
return true
}
// eslint-disable-next-line no-unused-vars
const or = (...checkers) => (object, permission) => {
for (const checker of checkers) {
if (checker(object, permission)) {
return true
const or =
(...checkers) =>
(object, permission) => {
for (const checker of checkers) {
if (checker(object, permission)) {
return true
}
}
return false
}
return false
}
// -------------------------------------------------------------------

View File

@@ -33,8 +33,8 @@
"bluebird": "^3.5.1",
"chalk": "^4.1.0",
"exec-promise": "^0.7.0",
"fs-extra": "^9.0.0",
"http-request-plus": "^0.10.0",
"fs-extra": "^10.0.0",
"http-request-plus": "^0.12",
"human-format": "^0.11.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",

View File

@@ -7,10 +7,7 @@ import execPromise = require('exec-promise')
import through2 = require('through2')
import Xo from 'xo-lib'
const parseBoolean = (
value: string,
defaultValue?: boolean
): boolean | undefined => {
const parseBoolean = (value: string, defaultValue?: boolean): boolean | undefined => {
if (value === undefined || value === '') {
return defaultValue
}
@@ -49,30 +46,24 @@ execPromise(
const errors: any[] = []
const stream = process.stdin.pipe(csvParser()).pipe(
through2.obj(
(
{ allowUnauthorized, autoConnect, host, label, password, username },
_,
next
) => {
console.log('server', host)
through2.obj(({ allowUnauthorized, autoConnect, host, label, password, username }, _, next) => {
console.log('server', host)
xo.call('server.add', {
allowUnauthorized: parseBoolean(allowUnauthorized),
autoConnect: parseBoolean(autoConnect, false),
host,
label,
password,
username,
}).then(
() => next(),
(error: any) => {
errors.push({ host, error })
return next()
}
)
}
)
xo.call('server.add', {
allowUnauthorized: parseBoolean(allowUnauthorized),
autoConnect: parseBoolean(autoConnect, false),
host,
label,
password,
username,
}).then(
() => next(),
(error: any) => {
errors.push({ host, error })
return next()
}
)
})
)
await new Promise((resolve, reject) => {

View File

@@ -47,7 +47,7 @@
"dependencies": {
"@xen-orchestra/audit-core": "^0.2.0",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/log": "^0.2.1",
"@xen-orchestra/log": "^0.3.0",
"async-iterator-to-stream": "^1.1.0",
"promise-toolbox": "^0.19.2",
"readable-stream": "^3.5.0",

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-ldap",
"version": "0.10.2",
"version": "0.10.4",
"license": "AGPL-3.0-or-later",
"description": "LDAP authentication plugin for XO-Server",
"keywords": [
@@ -31,6 +31,8 @@
"node": ">=10"
},
"dependencies": {
"@xen-orchestra/log": "^0.3.0",
"ensure-array": "^1.0.0",
"exec-promise": "^0.7.0",
"inquirer": "^8.0.0",
"ldapts": "^2.2.1",

View File

@@ -1,10 +1,14 @@
/* eslint no-throw-literal: 0 */
import ensureArray from 'ensure-array'
import fromCallback from 'promise-toolbox/fromCallback'
import { Client } from 'ldapts'
import { createLogger } from '@xen-orchestra/log'
import { Filter } from 'ldapts/filters/Filter'
import { readFile } from 'fs'
const logger = createLogger('xo:xo-server-auth-ldap')
// ===================================================================
const DEFAULTS = {
@@ -26,8 +30,6 @@ const evalFilter = (filter, vars) =>
return escape(value)
})
const noop = Function.prototype
export const configurationSchema = {
type: 'object',
properties: {
@@ -183,8 +185,7 @@ export const testSchema = {
// ===================================================================
class AuthLdap {
constructor({ logger = noop, xo }) {
this._logger = logger
constructor({ xo } = {}) {
this._xo = xo
this._authenticate = this._authenticate.bind(this)
@@ -256,10 +257,8 @@ class AuthLdap {
}
async _authenticate({ username, password }) {
const logger = this._logger
if (username === undefined || password === undefined) {
logger('require `username` and `password` to authenticate!')
logger.debug('require `username` and `password` to authenticate!')
return null
}
@@ -275,29 +274,34 @@ class AuthLdap {
{
const { _credentials: credentials } = this
if (credentials) {
logger(`attempting to bind with as ${credentials.dn}...`)
logger.debug(`attempting to bind with as ${credentials.dn}...`)
await client.bind(credentials.dn, credentials.password)
logger(`successfully bound as ${credentials.dn}`)
logger.debug(`successfully bound as ${credentials.dn}`)
}
}
// Search for the user.
logger('searching for entries...')
logger.debug('searching for entries...')
const { searchEntries: entries } = await client.search(this._searchBase, {
scope: 'sub',
filter: evalFilter(this._searchFilter, {
name: username,
}),
})
logger(`${entries.length} entries found`)
logger.debug(`${entries.length} entries found`)
// Try to find an entry which can be bind with the given password.
for (const entry of entries) {
try {
logger(`attempting to bind as ${entry.dn}`)
logger.debug(`attempting to bind as ${entry.dn}`)
await client.bind(entry.dn, password)
logger(`successfully bound as ${entry.dn} => ${username} authenticated`)
logger(JSON.stringify(entry, null, 2))
logger.info(`successfully bound as ${entry.dn} => ${username} authenticated`)
logger.debug(JSON.stringify(entry, null, 2))
// CLI test: don't register user/sync groups
if (this._xo === undefined) {
return
}
let user
if (this._userIdAttribute === undefined) {
@@ -314,18 +318,18 @@ class AuthLdap {
try {
await this._synchronizeGroups(user, entry[groupsConfig.membersMapping.userAttribute])
} catch (error) {
logger(`failed to synchronize groups: ${error.message}`)
logger.error(`failed to synchronize groups: ${error.message}`)
}
}
}
return { userId: user.id }
} catch (error) {
logger(`failed to bind as ${entry.dn}: ${error.message}`)
logger.debug(`failed to bind as ${entry.dn}: ${error.message}`)
}
}
logger(`could not authenticate ${username}`)
logger.debug(`could not authenticate ${username}`)
return null
} finally {
await client.unbind()
@@ -334,7 +338,6 @@ class AuthLdap {
// Synchronize user's groups OR all groups if no user is passed
async _synchronizeGroups(user, memberId) {
const logger = this._logger
const client = new Client(this._clientOpts)
try {
@@ -346,12 +349,12 @@ class AuthLdap {
{
const { _credentials: credentials } = this
if (credentials) {
logger(`attempting to bind with as ${credentials.dn}...`)
logger.debug(`attempting to bind with as ${credentials.dn}...`)
await client.bind(credentials.dn, credentials.password)
logger(`successfully bound as ${credentials.dn}`)
logger.debug(`successfully bound as ${credentials.dn}`)
}
}
logger('syncing groups...')
logger.info('syncing groups...')
const { base, displayNameAttribute, filter, idAttribute, membersMapping } = this._groupsConfig
const { searchEntries: ldapGroups } = await client.search(base, {
scope: 'sub',
@@ -373,12 +376,11 @@ class AuthLdap {
// Empty or undefined names/IDs are invalid
if (!groupLdapId || !groupLdapName) {
logger(`Invalid group ID (${groupLdapId}) or name (${groupLdapName})`)
logger.error(`Invalid group ID (${groupLdapId}) or name (${groupLdapName})`)
continue
}
let ldapGroupMembers = ldapGroup[membersMapping.groupAttribute]
ldapGroupMembers = Array.isArray(ldapGroupMembers) ? ldapGroupMembers : [ldapGroupMembers]
const ldapGroupMembers = ensureArray(ldapGroup[membersMapping.groupAttribute])
// If a user was passed, only update the user's groups
if (user !== undefined && !ldapGroupMembers.includes(memberId)) {
@@ -393,7 +395,7 @@ class AuthLdap {
if (xoGroupIndex === -1) {
if (xoGroups.find(group => group.name === groupLdapName) !== undefined) {
// TODO: check against LDAP groups that are being created as well
logger(`A group called ${groupLdapName} already exists`)
logger.error(`A group called ${groupLdapName} already exists`)
continue
}
xoGroup = await this._xo.createGroup({
@@ -459,6 +461,8 @@ class AuthLdap {
xoGroups.filter(group => group.provider === 'ldap').map(group => this._xo.deleteGroup(group.id))
)
}
logger.info('done syncing groups')
} finally {
await client.unbind()
}

View File

@@ -1,6 +1,8 @@
#!/usr/bin/env node
import execPromise from 'exec-promise'
import transportConsole from '@xen-orchestra/log/transports/console'
import { configure } from '@xen-orchestra/log/configure.js'
import { fromCallback } from 'promise-toolbox'
import { readFile, writeFile } from 'fs'
@@ -28,9 +30,14 @@ execPromise(async args => {
}
)
const plugin = createPlugin({
logger: console.log.bind(console),
})
configure([
{
filter: process.env.DEBUG ?? 'xo:xo-server-auth-ldap',
transport: transportConsole(),
},
])
const plugin = createPlugin()
await plugin.configure(config)
await plugin._authenticate({

View File

@@ -33,7 +33,7 @@
},
"dependencies": {
"@xen-orchestra/defined": "^0.0.1",
"@xen-orchestra/log": "^0.2.1",
"@xen-orchestra/log": "^0.3.0",
"human-format": "^0.11.0",
"lodash": "^4.13.1",
"moment-timezone": "^0.5.13"

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-load-balancer",
"version": "0.6.0",
"version": "0.7.0",
"license": "AGPL-3.0-or-later",
"description": "Load balancer for XO-Server",
"keywords": [
@@ -28,6 +28,7 @@
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/log": "^0.3.0",
"lodash": "^4.16.2"
},
"devDependencies": {

View File

@@ -1,7 +1,9 @@
import { clone, filter, map as mapToArray } from 'lodash'
import Plan from './plan'
import { debug } from './utils'
import { debug as debugP } from './utils'
export const debug = str => debugP(`density: ${str}`)
// ===================================================================
@@ -87,7 +89,7 @@ export default class DensityPlan extends Plan {
hostsAverages = simulResults.hostsAverages
// Migrate.
await this._migrate(hostId, simulResults.moves)
await this._migrate(hostToOptimize, simulResults.moves)
optimizationsCount++
}
}
@@ -195,24 +197,27 @@ export default class DensityPlan extends Plan {
// Migrate the VMs of one host.
// Try to shutdown the VMs host.
async _migrate(hostId, moves) {
const xapiSrc = this.xo.getXapi(hostId)
async _migrate(srcHost, moves) {
const xapiSrc = this.xo.getXapi(srcHost.id)
const fmtSrcHost = `${srcHost.id} "${srcHost.name_label}"`
await Promise.all(
mapToArray(moves, move => {
const { vm, destination } = move
const xapiDest = this.xo.getXapi(destination)
debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${vm.$container}).`)
debug(
`Migrate VM (${vm.id} "${vm.name_label}") to Host (${destination.id} "${destination.name_label}") from Host (${fmtSrcHost}).`
)
return xapiDest.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId)
})
)
debug(`Shutdown Host (${hostId}).`)
debug(`Shutdown Host (${fmtSrcHost}).`)
try {
await xapiSrc.shutdownHost(hostId)
await xapiSrc.shutdownHost(srcHost.id)
} catch (error) {
debug(`Unable to shutdown Host (${hostId}).`, error)
debug(`Unable to shutdown Host (${fmtSrcHost}).`, { error })
}
}
}

View File

@@ -1,7 +1,9 @@
import { filter } from 'lodash'
import Plan from './plan'
import { debug } from './utils'
import { debug as debugP } from './utils'
export const debug = str => debugP(`performance: ${str}`)
function epsiEqual(a, b, epsi = 0.001) {
const absA = Math.abs(a)
@@ -120,6 +122,7 @@ export default class PerformancePlan extends Plan {
const xapiSrc = this.xo.getXapi(exceededHost)
let optimizationCount = 0
const fmtSrcHost = `${exceededHost.id} "${exceededHost.name_label}"`
for (const vm of vms) {
// Stop migration if we are below low threshold.
if (
@@ -197,13 +200,15 @@ export default class PerformancePlan extends Plan {
exceededAverages.memoryFree += vmAverages.memory
destinationAverages.memoryFree -= vmAverages.memory
debug(`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id}).`)
debug(
`Migrate VM (${vm.id} "${vm.name_label}") to Host (${destination.id} "${destination.name_label}") from Host (${fmtSrcHost}).`
)
optimizationCount++
promises.push(xapiSrc.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId))
}
await Promise.all(promises)
debug(`Performance mode: ${optimizationCount} optimizations for Host (${exceededHost.id}).`)
debug(`Performance mode: ${optimizationCount} optimizations for Host (${fmtSrcHost}).`)
}
}

View File

@@ -20,6 +20,8 @@ const LOW_THRESHOLD_MEMORY_FREE_FACTOR = 1.5
const numberOrDefault = (value, def) => (value >= 0 ? value : def)
export const debugAffinity = str => debug(`anti-affinity: ${str}`)
// ===================================================================
// Averages.
// ===================================================================
@@ -299,14 +301,14 @@ export default class Plan {
}
// 2. Migrate!
debug('Try to apply anti-affinity policy.')
debug(`VM tag count per host: ${inspect(taggedHosts, { depth: null })}.`)
debug(`Tags diff: ${inspect(tagsDiff, { depth: null })}.`)
debugAffinity('Try to apply anti-affinity policy.')
debugAffinity(`VM tag count per host: ${inspect(taggedHosts, { depth: null })}.`)
debugAffinity(`Tags diff: ${inspect(tagsDiff, { depth: null })}.`)
const vmsAverages = await this._getVmsAverages(allVms, idToHost)
const { averages: hostsAverages } = await this._getHostStatsAverages({ hosts: allHosts })
debug(`Hosts averages: ${inspect(hostsAverages, { depth: null })}.`)
debugAffinity(`Hosts averages: ${inspect(hostsAverages, { depth: null })}.`)
const promises = []
for (const tag in tagsDiff) {
@@ -314,7 +316,7 @@ export default class Plan {
}
// 3. Done!
debug(`VM tag count per host after migration: ${inspect(taggedHosts, { depth: null })}.`)
debugAffinity(`VM tag count per host after migration: ${inspect(taggedHosts, { depth: null })}.`)
return Promise.all(promises)
}
@@ -362,11 +364,11 @@ export default class Plan {
let vm
for (const destination of destinations) {
destinationHost = destination
debug(`Host candidate: ${sourceHost.id} -> ${destinationHost.id}.`)
debugAffinity(`Host candidate: ${sourceHost.id} -> ${destinationHost.id}.`)
const vms = filter(sourceVms, vm => hostsAverages[destinationHost.id].memoryFree >= vmsAverages[vm.id].memory)
debug(
debugAffinity(
`Tagged VM ("${tag}") candidates to migrate from host ${sourceHost.id}: ${inspect(mapToArray(vms, 'id'))}.`
)
vm = this._getAntiAffinityVmToMigrate({
@@ -385,7 +387,12 @@ export default class Plan {
if (!vm) {
continue // If we can't find a VM to migrate, we must try with another source!
}
debug(`Migrate VM (${vm.id}) to Host (${destinationHost.id}) from Host (${sourceHost.id}).`)
const source = idToHost[sourceHost.id]
const destination = idToHost[destinationHost.id]
debugAffinity(
`Migrate VM (${vm.id} "${vm.name_label}") to Host (${destinationHost.id} "${destination.name_label}") from Host (${sourceHost.id} "${source.name_label}").`
)
// 3. Update tags and averages.
// This update can change the source host for the next migration.
@@ -405,12 +412,7 @@ export default class Plan {
delete sourceHost.vms[vm.id]
// 4. Migrate.
const destination = idToHost[destinationHost.id]
promises.push(
this.xo
.getXapi(idToHost[sourceHost.id])
.migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId)
)
promises.push(this.xo.getXapi(source).migrateVm(vm._xapiId, this.xo.getXapi(destination), destination._xapiId))
break // Continue with the same tag, the source can be different.
}
@@ -513,7 +515,7 @@ export default class Plan {
bestVariance = variance
bestVm = vm
} else {
debug(`VM (${vm.id}) of Host (${sourceHost.id}) does not support pool migration.`)
debugAffinity(`VM (${vm.id}) of Host (${sourceHost.id}) does not support pool migration.`)
}
}
}

View File

@@ -1,11 +1,7 @@
const noop = () => {}
import { createLogger } from '@xen-orchestra/log'
const LOAD_BALANCER_DEBUG = 1
export const { debug } = createLogger('xo:load-balancer')
// Delay between each resources evaluation in minutes.
// Must be less than MINUTES_OF_HISTORICAL_DATA.
export const EXECUTION_DELAY = 1
// ===================================================================
export const debug = LOAD_BALANCER_DEBUG ? str => console.log(`[load-balancer]${str}`) : noop

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-netbox",
"version": "0.1.1",
"version": "0.3.1",
"license": "AGPL-3.0-or-later",
"description": "Synchronizes pools managed by Xen Orchestra with Netbox",
"keywords": [
@@ -29,8 +29,8 @@
"node": ">=14.6"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.1",
"is-in-subnet": "^4.0.1",
"@xen-orchestra/log": "^0.3.0",
"ipaddr.js": "^2.0.1",
"lodash": "^4.17.21"
},
"devDependencies": {

View File

@@ -1,7 +1,7 @@
import assert from 'assert'
import ipaddr from 'ipaddr.js'
import { createLogger } from '@xen-orchestra/log'
import { find, flatten, forEach, groupBy, isEmpty, keyBy, mapValues, trimEnd, zipObject } from 'lodash'
import { isInSubnet } from 'is-in-subnet'
import { find, flatten, forEach, groupBy, isEmpty, keyBy, mapValues, omit, trimEnd, zipObject } from 'lodash'
const log = createLogger('xo:netbox')
@@ -40,6 +40,7 @@ const onRequest = req => {
}
class Netbox {
#allowUnauthorized
#endpoint
#intervalToken
#loaded
@@ -58,6 +59,7 @@ class Netbox {
if (!/^https?:\/\//.test(this.#endpoint)) {
this.#endpoint = 'http://' + this.#endpoint
}
this.#allowUnauthorized = configuration.allowUnauthorized ?? false
this.#token = configuration.token
this.#pools = configuration.pools
this.#syncInterval = configuration.syncInterval && configuration.syncInterval * 60 * 60 * 1e3
@@ -97,15 +99,15 @@ class Netbox {
}
async #makeRequest(path, method, data) {
log.debug(
`${method} ${path}`,
const dataDebug =
Array.isArray(data) && data.length > 2 ? [...data.slice(0, 2), `and ${data.length - 2} others`] : data
)
log.debug(`${method} ${path}`, dataDebug)
let url = this.#endpoint + '/api' + path
const options = {
headers: { 'Content-Type': 'application/json', Authorization: `Token ${this.#token}` },
method,
onRequest,
rejectUnauthorized: !this.#allowUnauthorized,
}
const httpRequest = async () => {
@@ -116,10 +118,15 @@ class Netbox {
return JSON.parse(body)
}
} catch (error) {
error.data = {
method,
path,
body: dataDebug,
}
try {
const body = await error.response.readAll()
if (body.length > 0) {
log.error(body.toString())
error.data.error = JSON.parse(body)
}
} catch {
throw error
@@ -160,7 +167,23 @@ class Netbox {
return results
}
async #checkCustomFields() {
const customFields = await this.#makeRequest('/extras/custom-fields/', 'GET')
const uuidCustomField = customFields.find(field => field.name === 'uuid')
if (uuidCustomField === undefined) {
throw new Error('UUID custom field was not found. Please create it manually from your Netbox interface.')
}
const { content_types: types } = uuidCustomField
if (!types.includes('virtualization.cluster') || !types.includes('virtualization.virtualmachine')) {
throw new Error(
'UUID custom field must be assigned to types virtualization.cluster and virtualization.virtualmachine'
)
}
}
async #synchronize(pools = this.#pools) {
await this.#checkCustomFields()
const xo = this.#xo
log.debug('synchronizing')
// Cluster type
@@ -211,6 +234,10 @@ class Netbox {
}
}
// FIXME: Should we deduplicate cluster names even though it also fails when
// a cluster within another cluster type has the same name?
// FIXME: Should we delete clusters from this cluster type that don't have a
// UUID?
Object.assign(
clusters,
keyBy(
@@ -230,30 +257,54 @@ class Netbox {
// VMs
const vms = xo.getObjects({ filter: object => object.type === 'VM' && pools.includes(object.$pool) })
const oldNetboxVms = keyBy(
flatten(
// FIXME: It should be doable with one request:
// `cluster_id=1&cluster_id=2` but it doesn't work
// https://netbox.readthedocs.io/en/stable/rest-api/filtering/#filtering-objects
await Promise.all(
pools.map(poolId =>
this.#makeRequest(`/virtualization/virtual-machines/?cluster_id=${clusters[poolId].id}`, 'GET')
)
let oldNetboxVms = flatten(
// FIXME: It should be doable with one request:
// `cluster_id=1&cluster_id=2` but it doesn't work
// https://netbox.readthedocs.io/en/stable/rest-api/filtering/#filtering-objects
await Promise.all(
pools.map(poolId =>
this.#makeRequest(`/virtualization/virtual-machines/?cluster_id=${clusters[poolId].id}`, 'GET')
)
),
'custom_fields.uuid'
)
)
const vmsWithNoUuid = oldNetboxVms.filter(vm => vm.custom_fields.uuid === null)
oldNetboxVms = omit(keyBy(oldNetboxVms, 'custom_fields.uuid'), null)
// Delete VMs that don't have a UUID custom field. This can happen if they
// were created manually or if the custom field config was changed after
// their creation
if (vmsWithNoUuid !== undefined) {
log.warn(`Found ${vmsWithNoUuid.length} VMs with no UUID. Deleting them.`)
await this.#makeRequest(
'/virtualization/virtual-machines/',
'DELETE',
vmsWithNoUuid.map(vm => ({ id: vm.id }))
)
}
// Build collections for later
const netboxVms = {} // VM UUID → Netbox VM
const vifsByVm = {} // VM UUID → VIF
const vifsByVm = {} // VM UUID → VIF UUID[]
const ipsByDeviceByVm = {} // VM UUID → (VIF device → IP)
const primaryIpsByVm = {} // VM UUID → { ipv4, ipv6 }
const vmsToCreate = []
const vmsToUpdate = []
let vmsToUpdate = [] // will be reused for primary IPs
for (const vm of Object.values(vms)) {
vifsByVm[vm.uuid] = vm.VIFs
const vmIpsByDevice = (ipsByDeviceByVm[vm.uuid] = {})
if (primaryIpsByVm[vm.uuid] === undefined) {
primaryIpsByVm[vm.uuid] = {}
}
if (vm.addresses['0/ipv4/0'] !== undefined) {
primaryIpsByVm[vm.uuid].ipv4 = vm.addresses['0/ipv4/0']
}
if (vm.addresses['0/ipv6/0'] !== undefined) {
primaryIpsByVm[vm.uuid].ipv6 = ipaddr.parse(vm.addresses['0/ipv6/0']).toString()
}
forEach(vm.addresses, (address, key) => {
const device = key.split('/')[0]
if (vmIpsByDevice[device] === undefined) {
@@ -445,32 +496,38 @@ class Netbox {
this.#makeRequest('/virtualization/interfaces/', 'DELETE', interfacesToDelete),
isEmpty(interfacesToCreateByVif)
? {}
: this.#makeRequest(
'/virtualization/interfaces/',
'POST',
Object.values(interfacesToCreateByVif)
).then(interfaces => zipObject(Object.keys(interfacesToCreateByVif), interfaces)),
: this.#makeRequest('/virtualization/interfaces/', 'POST', Object.values(interfacesToCreateByVif)).then(
interfaces => zipObject(Object.keys(interfacesToCreateByVif), interfaces)
),
isEmpty(interfacesToUpdateByVif)
? {}
: this.#makeRequest(
'/virtualization/interfaces/',
'PATCH',
Object.values(interfacesToUpdateByVif)
).then(interfaces => zipObject(Object.keys(interfacesToUpdateByVif), interfaces)),
: this.#makeRequest('/virtualization/interfaces/', 'PATCH', Object.values(interfacesToUpdateByVif)).then(
interfaces => zipObject(Object.keys(interfacesToUpdateByVif), interfaces)
),
])
)
.slice(1)
.forEach(newInterfaces => Object.assign(interfaces, newInterfaces))
// IPs
const [oldNetboxIps, prefixes] = await Promise.all([
this.#makeRequest('/ipam/ip-addresses/', 'GET').then(addresses => groupBy(addresses, 'assigned_object_id')),
const [oldNetboxIps, netboxPrefixes] = await Promise.all([
this.#makeRequest('/ipam/ip-addresses/', 'GET').then(addresses =>
groupBy(
// In Netbox, a device interface and a VM interface can have the same
// ID and an IP address can be assigned to both types of interface, so
// we need to make sure that we only get IPs that are assigned to a VM
// interface before grouping them by their `assigned_object_id`
addresses.filter(address => address.assigned_object_type === 'virtualization.vminterface'),
'assigned_object_id'
)
),
this.#makeRequest('/ipam/prefixes/', 'GET'),
])
const ipsToDelete = []
const ipsToCreate = []
const ignoredIps = []
const netboxIpsByVif = {}
for (const [vmUuid, vifs] of Object.entries(vifsByVm)) {
const vmIpsByDevice = ipsByDeviceByVm[vmUuid]
if (vmIpsByDevice === undefined) {
@@ -483,26 +540,45 @@ class Netbox {
continue
}
netboxIpsByVif[vifId] = []
const interface_ = interfaces[vif.uuid]
const interfaceOldIps = oldNetboxIps[interface_.id] ?? []
for (const ip of vifIps) {
// FIXME: Should we compare the IPs with their range? ie: can 2 IPs
// look identical but belong to 2 different ranges?
const netboxIpIndex = interfaceOldIps.findIndex(netboxIp => netboxIp.address.split('/')[0] === ip)
const parsedIp = ipaddr.parse(ip)
const ipKind = parsedIp.kind()
const ipCompactNotation = parsedIp.toString()
let smallestPrefix
let highestBits = 0
netboxPrefixes.forEach(({ prefix }) => {
const [range, bits] = prefix.split('/')
const parsedRange = ipaddr.parse(range)
if (parsedRange.kind() === ipKind && parsedIp.match(parsedRange, bits) && bits > highestBits) {
smallestPrefix = prefix
highestBits = bits
}
})
if (smallestPrefix === undefined) {
ignoredIps.push(ip)
continue
}
const netboxIpIndex = interfaceOldIps.findIndex(netboxIp => {
const [ip, bits] = netboxIp.address.split('/')
return ipaddr.parse(ip).toString() === ipCompactNotation && bits === highestBits
})
if (netboxIpIndex >= 0) {
netboxIpsByVif[vifId].push(interfaceOldIps[netboxIpIndex])
interfaceOldIps.splice(netboxIpIndex, 1)
} else {
const prefix = prefixes.find(({ prefix }) => isInSubnet(ip, prefix))
if (prefix === undefined) {
ignoredIps.push(ip)
continue
}
ipsToCreate.push({
address: `${ip}/${prefix.prefix.split('/')[1]}`,
address: `${ip}/${smallestPrefix.split('/')[1]}`,
assigned_object_type: 'virtualization.vminterface',
assigned_object_id: interface_.id,
vifId, // needed to populate netboxIpsByVif with newly created IPs
})
}
}
@@ -516,9 +592,61 @@ class Netbox {
await Promise.all([
ipsToDelete.length !== 0 && this.#makeRequest('/ipam/ip-addresses/', 'DELETE', ipsToDelete),
ipsToCreate.length !== 0 && this.#makeRequest('/ipam/ip-addresses/', 'POST', ipsToCreate),
ipsToCreate.length !== 0 &&
this.#makeRequest(
'/ipam/ip-addresses/',
'POST',
ipsToCreate.map(ip => omit(ip, 'vifId'))
).then(newNetboxIps => {
newNetboxIps.forEach((newNetboxIp, i) => {
const { vifId } = ipsToCreate[i]
if (netboxIpsByVif[vifId] === undefined) {
netboxIpsByVif[vifId] = []
}
netboxIpsByVif[vifId].push(newNetboxIp)
})
}),
])
// Primary IPs
vmsToUpdate = []
Object.entries(netboxVms).forEach(([vmId, netboxVm]) => {
if (netboxVm.primary_ip4 !== null && netboxVm.primary_ip6 !== null) {
return
}
const newNetboxVm = { id: netboxVm.id }
const vifs = vifsByVm[vmId]
vifs.forEach(vifId => {
const netboxIps = netboxIpsByVif[vifId]
const vmMainIps = primaryIpsByVm[vmId]
netboxIps?.forEach(netboxIp => {
const address = netboxIp.address.split('/')[0]
if (
newNetboxVm.primary_ip4 === undefined &&
address === vmMainIps.ipv4 &&
netboxVm.primary_ip4?.address !== netboxIp.address
) {
newNetboxVm.primary_ip4 = netboxIp.id
}
if (
newNetboxVm.primary_ip6 === undefined &&
address === vmMainIps.ipv6 &&
netboxVm.primary_ip6?.address !== netboxIp.address
) {
newNetboxVm.primary_ip6 = netboxIp.id
}
})
})
if (newNetboxVm.primary_ip4 !== undefined || newNetboxVm.primary_ip6 !== undefined) {
vmsToUpdate.push(newNetboxVm)
}
})
if (vmsToUpdate.length > 0) {
await this.#makeRequest('/virtualization/virtual-machines/', 'PATCH', vmsToUpdate)
}
log.debug('synchronized')
}
@@ -536,6 +664,8 @@ class Netbox {
'GET'
)
await this.#checkCustomFields()
if (clusterTypes.length !== 1) {
throw new Error('Could not properly write and read Netbox')
}
@@ -554,6 +684,11 @@ export const configurationSchema = ({ xo: { apiMethods } }) => ({
title: 'Endpoint',
description: 'Netbox URI',
},
allowUnauthorized: {
type: 'boolean',
title: 'Unauthorized certificates',
description: 'Enable this if your Netbox instance uses a self-signed SSL certificate',
},
token: {
type: 'string',
title: 'Token',

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-perf-alert",
"version": "0.3.1",
"version": "0.3.2",
"license": "AGPL-3.0-or-later",
"description": "Sends alerts based on performance criteria",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-perf-alert",

View File

@@ -1,6 +1,6 @@
import JSON5 from 'json5'
import { createSchedule } from '@xen-orchestra/cron'
import { forOwn, map, mean } from 'lodash'
import { filter, forOwn, map, mean } from 'lodash'
import { utcParse } from 'd3-time-format'
const XAPI_TO_XENCENTER = {
@@ -158,9 +158,9 @@ export const configurationSchema = {
type: 'object',
properties: {
smartMode: {
title: 'All hosts',
title: 'All running hosts',
type: 'boolean',
description: 'When enabled, all hosts will be considered for the alert.',
description: 'When enabled, all running hosts will be considered for the alert.',
default: false,
},
uuids: {
@@ -218,9 +218,9 @@ export const configurationSchema = {
type: 'object',
properties: {
smartMode: {
title: 'All VMs',
title: 'All running VMs',
type: 'boolean',
description: 'When enabled, all VMs will be considered for the alert.',
description: 'When enabled, all running VMs will be considered for the alert.',
default: false,
},
uuids: {
@@ -419,7 +419,8 @@ ${monitorBodies.join('\n')}`
}
_parseDefinition(definition) {
const lcObjectType = definition.objectType.toLowerCase()
const { objectType } = definition
const lcObjectType = objectType.toLowerCase()
const alarmId = `${lcObjectType}|${definition.variableName}|${definition.alarmTriggerLevel}`
const typeFunction = TYPE_FUNCTION_MAP[lcObjectType][definition.variableName]
const parseData = (result, uuid) => {
@@ -470,7 +471,12 @@ ${monitorBodies.join('\n')}`
return Promise.all(
map(
definition.smartMode
? map(this._xo.getObjects({ filter: { type: definition.objectType } }), obj => obj.uuid)
? filter(
this._xo.getObjects(),
obj =>
obj.type === objectType &&
((objectType !== 'VM' && objectType !== 'host') || obj.power_state === 'Running')
).map(obj => obj.uuid)
: definition.uuids,
async uuid => {
try {

View File

@@ -29,7 +29,7 @@
"cross-env": "^7.0.2"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.1",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/openflow": "^0.1.1",
"@vates/coalesce-calls": "^0.1.0",
"ipaddr.js": "^1.9.1",

View File

@@ -31,7 +31,7 @@
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/log": "^0.2.1",
"@xen-orchestra/log": "^0.3.0",
"csv-stringify": "^5.5.0",
"handlebars": "^4.0.6",
"html-minifier": "^4.0.0",

View File

@@ -29,7 +29,7 @@
"node": ">=8.10"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.1"
"@xen-orchestra/log": "^0.3.0"
},
"devDependencies": {
"@babel/cli": "^7.7.0",

View File

@@ -70,7 +70,7 @@ mergeProvidersUsers = true
# should be used by default.
defaultSignInPage = '/signin'
# Minimum delay between two password authentication attemps.
# Minimum delay between two password authentication attempts for a specific user.
#
# This is used to mitigate bruteforce attacks without being visible to users.
throttlingDelay = '2 seconds'
@@ -131,6 +131,13 @@ port = 80
[http.mounts]
'/' = '../xo-web/dist'
[logs]
# Display all logs matching this filter, regardless of their level
#filter = 'xo:load-balancer'
# Display all logs with level >=, regardless of their namespace
level = 'info'
[plugins]
[remoteOptions]

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.79.5",
"version": "5.82.3",
"license": "AGPL-3.0-or-later",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -35,17 +35,17 @@
"@vates/parse-duration": "^0.1.1",
"@vates/read-chunk": "^0.1.2",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.11.0",
"@xen-orchestra/backups": "^0.13.0",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/defined": "^0.0.1",
"@xen-orchestra/emit-async": "^0.1.0",
"@xen-orchestra/fs": "^0.17.0",
"@xen-orchestra/log": "^0.2.1",
"@xen-orchestra/fs": "^0.18.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.1.0",
"@xen-orchestra/mixins": "^0.1.1",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/template": "^0.1.0",
"@xen-orchestra/xapi": "^0.6.3",
"@xen-orchestra/xapi": "^0.7.0",
"ajv": "^8.0.3",
"app-conf": "^0.9.0",
"async-iterator-to-stream": "^1.0.1",
@@ -68,14 +68,14 @@
"express-session": "^1.15.6",
"fast-xml-parser": "^3.17.4",
"fatfs": "^0.10.4",
"fs-extra": "^9.0.0",
"fs-extra": "^10.0.0",
"get-stream": "^6.0.0",
"golike-defer": "^0.5.1",
"hashy": "^0.10.0",
"helmet": "^3.9.0",
"highland": "^2.11.1",
"http-proxy": "^1.16.2",
"http-request-plus": "^0.10.0",
"http-request-plus": "^0.12",
"http-server-plus": "^0.11.0",
"human-format": "^0.11.0",
"iterable-backoff": "^0.1.0",
@@ -122,10 +122,11 @@
"unzipper": "^0.10.5",
"uuid": "^8.3.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^1.0.0",
"vhd-lib": "^1.2.0",
"web-push": "^3.4.5",
"ws": "^7.1.2",
"xdg-basedir": "^4.0.0",
"xen-api": "^0.33.0",
"xen-api": "^0.34.3",
"xo-acl-resolver": "^0.4.1",
"xo-collection": "^0.5.0",
"xo-common": "^0.7.0",
@@ -146,7 +147,7 @@
"@babel/preset-env": "^7.0.0",
"babel-plugin-transform-dev": "^2.0.1",
"cross-env": "^7.0.2",
"index-modules": "^0.4.2"
"index-modules": "^0.4.3"
},
"scripts": {
"_build": "index-modules --index-file index.mjs src/api src/xapi/mixins src/xo-mixins && babel --delete-dir-on-start --keep-file-extension --source-maps --out-dir=dist/ src/",

View File

@@ -1,11 +1,12 @@
import * as multiparty from 'multiparty'
import assert from 'assert'
import getStream from 'get-stream'
import pump from 'pump'
import { createLogger } from '@xen-orchestra/log'
import { defer } from 'golike-defer'
import { format } from 'json-rpc-peer'
import { format, JsonRpcError } from 'json-rpc-peer'
import { noSuchObject } from 'xo-common/api-errors.js'
import { peekFooterFromVhdStream } from 'vhd-lib'
import { checkFooter, peekFooterFromVhdStream } from 'vhd-lib'
import { vmdkToVhd } from 'xo-vmdk-to-vhd'
import { VDI_FORMAT_VHD } from '../xapi/index.mjs'
@@ -161,44 +162,59 @@ async function handleImport(req, res, { type, name, description, vmdkData, srId,
const form = new multiparty.Form()
form.on('error', reject)
form.on('part', async part => {
if (part.name !== 'file') {
promises.push(
(async () => {
const view = new DataView((await getStream.buffer(part)).buffer)
const result = new Uint32Array(view.byteLength / 4)
for (const i in result) {
result[i] = view.getUint32(i * 4, true)
}
vmdkData[part.name] = result
})()
)
} else {
await Promise.all(promises)
part.length = part.byteCount
if (type === 'vmdk') {
vhdStream = await vmdkToVhd(part, vmdkData.grainLogicalAddressList, vmdkData.grainFileOffsetList)
size = vmdkData.capacity
} else if (type === 'vhd') {
vhdStream = part
const footer = await peekFooterFromVhdStream(vhdStream)
size = footer.currentSize
try {
if (part.name !== 'file') {
promises.push(
(async () => {
const buffer = await getStream.buffer(part)
vmdkData[part.name] = new Uint32Array(
buffer.buffer,
buffer.byteOffset,
buffer.length / Uint32Array.BYTES_PER_ELEMENT
)
})()
)
} else {
throw new Error(`Unknown disk type, expected "vhd" or "vmdk", got ${type}`)
await Promise.all(promises)
part.length = part.byteCount
if (type === 'vmdk') {
vhdStream = await vmdkToVhd(part, vmdkData.grainLogicalAddressList, vmdkData.grainFileOffsetList)
size = vmdkData.capacity
} else if (type === 'vhd') {
vhdStream = part
const footer = await peekFooterFromVhdStream(vhdStream)
try {
checkFooter(footer)
} catch (e) {
if (e instanceof assert.AssertionError) {
throw new JsonRpcError(`Vhd file had an invalid header ${e}`)
}
}
size = footer.currentSize
} else {
throw new JsonRpcError(`Unknown disk type, expected "vhd" or "vmdk", got ${type}`)
}
const vdi = await xapi.createVdi({
name_description: description,
name_label: name,
size,
sr: srId,
})
try {
await xapi.importVdiContent(vdi, vhdStream, VDI_FORMAT_VHD)
res.end(format.response(0, vdi.$id))
} catch (e) {
await vdi.$destroy()
throw e
}
resolve()
}
const vdi = await xapi.createVdi({
name_description: description,
name_label: name,
size,
sr: srId,
})
try {
await xapi.importVdiContent(vdi, vhdStream, VDI_FORMAT_VHD)
res.end(format.response(0, vdi.$id))
} catch (e) {
await vdi.$destroy()
throw e
}
resolve()
} catch (e) {
res.writeHead(500)
res.end(format.error(0, new JsonRpcError(e.message)))
// destroy the reader to stop the file upload
req.destroy()
reject(e)
}
})
form.parse(req)

View File

@@ -6,7 +6,7 @@ import { format } from 'json-rpc-peer'
export function setMaintenanceMode({ host, maintenance }) {
const xapi = this.getXapi(host)
return maintenance ? xapi.clearHost({ $ref: host._xapiRef }) : xapi.enableHost(host._xapiId)
return maintenance ? xapi.clearHost(xapi.getObject(host)) : xapi.enableHost(host._xapiId)
}
setMaintenanceMode.description = 'manage the maintenance mode'

View File

@@ -28,7 +28,6 @@ create.params = {
create.resolve = {
pool: ['pool', 'pool', 'administrate'],
}
create.permission = 'admin'
// =================================================================
@@ -63,7 +62,6 @@ createBonded.params = {
createBonded.resolve = {
pool: ['pool', 'pool', 'administrate'],
}
createBonded.permission = 'admin'
createBonded.description = 'Create a bonded network. bondMode can be balance-slb, active-backup or lacp'
// ===================================================================

View File

@@ -24,7 +24,10 @@ getMethodsInfo.permission = null // user does not need to be authenticated
// -------------------------------------------------------------------
export const getServerTimezone = (tz => () => tz)(moment.tz.guess())
export const getServerTimezone = (
tz => () =>
tz
)(moment.tz.guess())
getServerTimezone.description = 'return the timezone server'
// -------------------------------------------------------------------

View File

@@ -8,13 +8,7 @@ import { defer } from 'golike-defer'
import { FAIL_ON_QUEUE } from 'limit-concurrency-decorator'
import { format } from 'json-rpc-peer'
import { ignoreErrors } from 'promise-toolbox'
import {
forbiddenOperation,
invalidParameters,
noSuchObject,
operationFailed,
unauthorized,
} from 'xo-common/api-errors.js'
import { invalidParameters, noSuchObject, operationFailed, unauthorized } from 'xo-common/api-errors.js'
import { forEach, map, mapFilter, parseSize, safeDateFormat } from '../utils.mjs'
@@ -838,219 +832,6 @@ snapshot.resolve = {
// -------------------------------------------------------------------
export function rollingDeltaBackup({ vm, remote, tag, depth, retention = depth }) {
return this.rollingDeltaVmBackup({
vm,
remoteId: remote,
tag,
retention,
})
}
rollingDeltaBackup.params = {
id: { type: 'string' },
remote: { type: 'string' },
tag: { type: 'string' },
retention: { type: ['string', 'number'], optional: true },
// This parameter is deprecated. It used to support the old saved backups jobs.
depth: { type: ['string', 'number'], optional: true },
}
rollingDeltaBackup.resolve = {
vm: ['id', ['VM', 'VM-snapshot'], 'administrate'],
}
rollingDeltaBackup.permission = 'admin'
// -------------------------------------------------------------------
export function importDeltaBackup({ sr, remote, filePath, mapVdisSrs }) {
const mapVdisSrsXapi = {}
forEach(mapVdisSrs, (srId, vdiId) => {
mapVdisSrsXapi[vdiId] = this.getObject(srId, 'SR')._xapiId
})
return this.importDeltaVmBackup({
sr,
remoteId: remote,
filePath,
mapVdisSrs: mapVdisSrsXapi,
}).then(_ => _.vm)
}
importDeltaBackup.params = {
sr: { type: 'string' },
remote: { type: 'string' },
filePath: { type: 'string' },
// Map VDIs UUIDs --> SRs IDs
mapVdisSrs: { type: 'object', optional: true },
}
importDeltaBackup.resolve = {
sr: ['sr', 'SR', 'operate'],
}
importDeltaBackup.permission = 'admin'
// -------------------------------------------------------------------
export function deltaCopy({ force, vm, retention, sr }) {
return this.deltaCopyVm(vm, sr, force, retention)
}
deltaCopy.params = {
force: { type: 'boolean', optional: true },
id: { type: 'string' },
retention: { type: 'number', optional: true },
sr: { type: 'string' },
}
deltaCopy.resolve = {
vm: ['id', 'VM', 'operate'],
sr: ['sr', 'SR', 'operate'],
}
// -------------------------------------------------------------------
export async function rollingSnapshot({ vm, tag, depth, retention = depth }) {
await checkPermissionOnSrs.call(this, vm)
return this.rollingSnapshotVm(vm, tag, retention)
}
rollingSnapshot.params = {
id: { type: 'string' },
tag: { type: 'string' },
retention: { type: 'number', optional: true },
// This parameter is deprecated. It used to support the old saved backups jobs.
depth: { type: 'number', optional: true },
}
rollingSnapshot.resolve = {
vm: ['id', 'VM', 'administrate'],
}
rollingSnapshot.description =
'Snapshots a VM with a tagged name, and removes the oldest snapshot with the same tag according to retention'
// -------------------------------------------------------------------
export function backup({ vm, remoteId, file, compress }) {
return this.backupVm({ vm, remoteId, file, compress })
}
backup.permission = 'admin'
backup.params = {
id: { type: 'string' },
remoteId: { type: 'string' },
file: { type: 'string' },
compress: { type: 'boolean', optional: true },
}
backup.resolve = {
vm: ['id', 'VM', 'administrate'],
}
backup.description = 'Exports a VM to the file system'
// -------------------------------------------------------------------
export function importBackup({ remote, file, sr }) {
return this.importVmBackup(remote, file, sr)
}
importBackup.permission = 'admin'
importBackup.description = 'Imports a VM into host, from a file found in the chosen remote'
importBackup.params = {
remote: { type: 'string' },
file: { type: 'string' },
sr: { type: 'string' },
}
importBackup.resolve = {
sr: ['sr', 'SR', 'operate'],
}
importBackup.permission = 'admin'
// -------------------------------------------------------------------
export function rollingBackup({ vm, remoteId, tag, depth, retention = depth, compress }) {
return this.rollingBackupVm({
vm,
remoteId,
tag,
retention,
compress,
})
}
rollingBackup.permission = 'admin'
rollingBackup.params = {
id: { type: 'string' },
remoteId: { type: 'string' },
tag: { type: 'string' },
retention: { type: 'number', optional: true },
// This parameter is deprecated. It used to support the old saved backups jobs.
depth: { type: 'number', optional: true },
compress: { type: 'boolean', optional: true },
}
rollingBackup.resolve = {
vm: ['id', ['VM', 'VM-snapshot'], 'administrate'],
}
rollingBackup.description =
'Exports a VM to the file system with a tagged name, and removes the oldest backup with the same tag according to retention'
// -------------------------------------------------------------------
export function rollingDrCopy({ vm, pool, sr, tag, depth, retention = depth, deleteOldBackupsFirst }) {
if (sr === undefined) {
if (pool === undefined) {
throw invalidParameters('either pool or sr param should be specified')
}
if (vm.$pool === pool.id) {
throw forbiddenOperation('Disaster Recovery attempts to copy on the same pool')
}
sr = this.getObject(pool.default_SR, 'SR')
}
return this.rollingDrCopyVm({
vm,
sr,
tag,
retention,
deleteOldBackupsFirst,
})
}
rollingDrCopy.params = {
retention: { type: 'number', optional: true },
// This parameter is deprecated. It used to support the old saved backups jobs.
depth: { type: 'number', optional: true },
id: { type: 'string' },
pool: { type: 'string', optional: true },
sr: { type: 'string', optional: true },
tag: { type: 'string' },
deleteOldBackupsFirst: { type: 'boolean', optional: true },
}
rollingDrCopy.resolve = {
vm: ['id', ['VM', 'VM-snapshot'], 'administrate'],
pool: ['pool', 'pool', 'administrate'],
sr: ['sr', 'SR', 'administrate'],
}
rollingDrCopy.description =
'Copies a VM to a different pool, with a tagged name, and removes the oldest VM with the same tag from this pool, according to retention'
// -------------------------------------------------------------------
export function start({ vm, bypassMacAddressesCheck, force, host }) {
return this.getXapi(vm).startVm(vm._xapiId, { bypassMacAddressesCheck, force, hostId: host?._xapiId })
}
@@ -1269,12 +1050,12 @@ async function handleVmImport(req, res, { data, srId, type, xapi }) {
if (!(part.filename in tables)) {
tables[part.filename] = {}
}
const view = new DataView((await getStream.buffer(part)).buffer)
const result = new Uint32Array(view.byteLength / 4)
for (const i in result) {
result[i] = view.getUint32(i * 4, true)
}
tables[part.filename][part.name] = result
const buffer = await getStream.buffer(part)
tables[part.filename][part.name] = new Uint32Array(
buffer.buffer,
buffer.byteOffset,
buffer.length / Uint32Array.BYTES_PER_ELEMENT
)
data.tables = tables
})()
)

View File

@@ -1,6 +1,8 @@
// See: https://gist.github.com/julien-f/5b9a3537eb82a34b04e2
import { matcher } from 'micromatch'
import micromatch from 'micromatch'
const { matcher } = micromatch
export default function globMatcher(patterns, opts) {
if (!Array.isArray(patterns)) {

View File

@@ -20,6 +20,7 @@ import proxyConsole from './proxy-console.mjs'
import pw from 'pw'
import serveStatic from 'serve-static'
import stoppable from 'stoppable'
import webpush from 'web-push'
import WebServer from 'http-server-plus'
import WebSocket from 'ws'
import xdg from 'xdg-basedir'
@@ -71,8 +72,8 @@ configure([
{
filter: process.env.DEBUG,
level: 'info',
transport: transportConsole(),
},
transport: transportConsole()
}
])
const log = createLogger('xo:main')
@@ -84,7 +85,7 @@ const DEPRECATED_ENTRIES = ['users', 'servers']
async function loadConfiguration() {
const config = await appConf.load(APP_NAME, {
appDir: APP_DIR,
ignoreUnknownFormats: true,
ignoreUnknownFormats: true
})
log.info('Configuration loaded.')
@@ -105,7 +106,7 @@ async function updateLocalConfig(diff) {
const localConfig = await fse.readFile(LOCAL_CONFIG_FILE).then(JSON.parse, () => ({}))
merge(localConfig, diff)
await fse.outputFile(LOCAL_CONFIG_FILE, JSON.stringify(localConfig), {
mode: 0o600,
mode: 0o600
})
}
@@ -135,8 +136,8 @@ async function createExpressApp(config) {
saveUninitialized: false,
secret: sessionSecret,
store: new MemoryStore({
checkPeriod: 24 * 3600 * 1e3,
}),
checkPeriod: 24 * 3600 * 1e3
})
})
)
@@ -174,7 +175,7 @@ async function setUpPassport(express, xo, { authentication: authCfg, http: { coo
res.send(
signInPage({
error: req.flash('error')[0],
strategies,
strategies
})
)
})
@@ -193,7 +194,7 @@ async function setUpPassport(express, xo, { authentication: authCfg, http: { coo
signInPage({
error: req.flash('error')[0],
otp: true,
strategies,
strategies
})
)
})
@@ -219,7 +220,7 @@ async function setUpPassport(express, xo, { authentication: authCfg, http: { coo
const { user, isPersistent } = req.session
const token = await xo.createAuthenticationToken({
expiresIn: isPersistent ? PERMANENT_VALIDITY : SESSION_VALIDITY,
userId: user.id,
userId: user.id
})
res.cookie('token', token.id, {
@@ -227,7 +228,7 @@ async function setUpPassport(express, xo, { authentication: authCfg, http: { coo
// a session (non-permanent) cookie must not have an expiration date
// because it must not survive browser restart
...(isPersistent ? { expires: new Date(token.expiration) } : undefined),
...(isPersistent ? { expires: new Date(token.expiration) } : undefined)
})
delete req.session.isPersistent
@@ -286,6 +287,30 @@ async function setUpPassport(express, xo, { authentication: authCfg, http: { coo
}
})
)
// ==============================================================
const publicVapidKey = 'BDAqBcWLLjbzGSMjVqlhZmU88uiAVascwXn5mbiuMVFpsXiJixtIxVpu06pIX1b8cjXKYawsv-FuGhp9oH_1dwc'
const privateVapidKey = 'b1QTbeDFOeu0th23w9bDEpLHfkSKGvXJ3VQq50gHEcQ'
webpush.setVapidDetails('mailto:example@yourdomain.org', publicVapidKey, privateVapidKey)
// subscribe route
express.use(createExpress.json())
express.post('/service-worker-subscribe', (req, res) => {
// get push subscription object from the request
const subscription = req.body
// send status 201 for the request
res.status(201).json({})
// create paylod: specified the detals of the push notification
const payload = JSON.stringify({
title: 'Titre de ma notification from server',
body: 'Contenu de ma notification',
url: 'https://www.vates.fr'
})
// pass the object into sendNotification fucntion and catch any error
webpush.sendNotification(subscription, payload).catch(err => console.error(err))
})
}
// ===================================================================
@@ -319,14 +344,14 @@ async function registerPlugin(pluginPath, pluginName) {
getDataDir: () => {
const dir = `${datadir}/${pluginName}`
return fse.ensureDir(dir).then(() => dir)
},
}
})
: factory
;[instance, configurationSchema, configurationPresets, testSchema] = await Promise.all([
handleFactory(factory),
handleFactory(configurationSchema),
handleFactory(configurationPresets),
handleFactory(testSchema),
handleFactory(testSchema)
])
await this.registerPlugin(
@@ -363,11 +388,11 @@ async function registerPluginsInPath(path, prefix) {
})
await Promise.all(
files.map(name => {
if (name.startsWith(prefix)) {
files
.filter(name => name.startsWith(prefix))
.map(name => {
return registerPluginWrapper.call(this, `${path}/${name}`, name.slice(prefix.length))
}
})
})
)
}
@@ -376,7 +401,7 @@ async function registerPlugins(xo) {
[new URL('../node_modules', import.meta.url).pathname, '/usr/local/lib/node_modules'].map(path =>
Promise.all([
registerPluginsInPath.call(xo, path, 'xo-server-'),
registerPluginsInPath.call(xo, `${path}/@xen-orchestra`, 'server-'),
registerPluginsInPath.call(xo, `${path}/@xen-orchestra`, 'server-')
])
)
)
@@ -418,7 +443,7 @@ async function makeWebServerListen(
const pems = await genSelfSignedCert()
await Promise.all([
fse.outputFile(cert, pems.cert, { flag: 'wx', mode: 0o400 }),
fse.outputFile(key, pems.key, { flag: 'wx', mode: 0o400 }),
fse.outputFile(key, pems.key, { flag: 'wx', mode: 0o400 })
])
log.info('new certificate generated', { cert, key })
opts.cert = pems.cert
@@ -464,7 +489,7 @@ const setUpProxies = (express, opts, xo) => {
.createServer({
changeOrigin: true,
ignorePath: true,
xfwd: true,
xfwd: true
})
.on('error', (error, req, res) => {
// `res` can be either a `ServerResponse` or a `Socket` (which does not have
@@ -478,7 +503,7 @@ const setUpProxies = (express, opts, xo) => {
const { method, url } = req
log.error('failed to proxy request', {
error,
req: { method, url },
req: { method, url }
})
})
@@ -494,7 +519,7 @@ const setUpProxies = (express, opts, xo) => {
proxy.web(req, res, {
agent: new URL(target).hostname === 'localhost' ? undefined : xo.httpAgent,
target: target + url.slice(prefix.length),
target: target + url.slice(prefix.length)
})
return
@@ -506,7 +531,7 @@ const setUpProxies = (express, opts, xo) => {
// WebSocket proxy.
const webSocketServer = new WebSocket.Server({
noServer: true,
noServer: true
})
xo.hooks.on('stop', () => fromCallback.call(webSocketServer, 'close'))
@@ -519,7 +544,7 @@ const setUpProxies = (express, opts, xo) => {
proxy.ws(req, socket, head, {
agent: new URL(target).hostname === 'localhost' ? undefined : xo.httpAgent,
target: target + url.slice(prefix.length),
target: target + url.slice(prefix.length)
})
return
@@ -546,7 +571,7 @@ const setUpApi = (webServer, xo, config) => {
const webSocketServer = new WebSocket.Server({
...config.apiWebSocketOptions,
noServer: true,
noServer: true
})
xo.hooks.on('stop', () => fromCallback.call(webSocketServer, 'close'))
@@ -614,7 +639,7 @@ const CONSOLE_PROXY_PATH_RE = /^\/api\/consoles\/(.*)$/
const setUpConsoleProxy = (webServer, xo) => {
const webSocketServer = new WebSocket.Server({
noServer: true,
noServer: true
})
xo.hooks.on('stop', () => fromCallback.call(webSocketServer, 'close'))
@@ -644,7 +669,7 @@ const setUpConsoleProxy = (webServer, xo) => {
timestamp: Date.now(),
userId: user.id,
userIp: remoteAddress,
userName: user.name,
userName: user.name
}
if (vm.is_control_domain) {
@@ -663,7 +688,7 @@ const setUpConsoleProxy = (webServer, xo) => {
socket.on('close', () => {
xo.emit('xo:audit', 'consoleClosed', {
...data,
timestamp: Date.now(),
timestamp: Date.now()
})
log.info(`- Console proxy (${user.name} - ${remoteAddress})`)
})
@@ -710,7 +735,7 @@ export default async function main(args) {
blocked((time, stack) => {
logPerf.info(`blocked for ${ms(time)}`, {
time,
stack,
stack
})
}, options)
}
@@ -742,7 +767,7 @@ export default async function main(args) {
appVersion: APP_VERSION,
config,
httpServer: webServer,
safeMode,
safeMode
})
// Register web server close on XO stop.

View File

@@ -40,7 +40,9 @@ export const mergeObjects = objects => Object.assign({}, ...objects)
//
// Ex: crossProduct([ [ { a: 2 }, { b: 3 } ], [ { c: 5 }, { d: 7 } ] ] )
// => [ { a: 2, c: 5 }, { b: 3, c: 5 }, { a: 2, d: 7 }, { b: 3, d: 7 } ]
export const crossProduct = (vectors, mergeFn = mergeObjects) => cb =>
combine(vectors)(vector => {
cb(mergeFn(vector))
})
export const crossProduct =
(vectors, mergeFn = mergeObjects) =>
cb =>
combine(vectors)(vector => {
cb(mergeFn(vector))
})

View File

@@ -326,10 +326,34 @@ const TRANSFORMS = {
// Merge old ipv4 protocol with the new protocol
// See: https://github.com/xapi-project/xen-api/blob/324bc6ee6664dd915c0bbe57185f1d6243d9ed7e/ocaml/xapi/xapi_guest_agent.ml#L59-L81
// Old protocol: when there's more than 1 IP on an interface, the IPs
// are space or newline delimited in the same `x/ip` field
// See https://github.com/vatesfr/xen-orchestra/issues/5801#issuecomment-854337568
// The `x/ip` field may have a `x/ipv4/0` alias
// e.g:
// {
// '1/ip': '<IP1> <IP2>',
// '1/ipv4/0': '<IP1> <IP2>',
// }
// See https://xcp-ng.org/forum/topic/4810
const addresses = {}
for (const key in networks) {
const [, i] = /^(\d+)\/ip$/.exec(key) ?? []
addresses[i !== undefined ? `${i}/ipv4/0` : key] = networks[key]
const [, device, index] = /^(\d+)\/ip(?:v[46]\/(\d))?$/.exec(key) ?? []
const ips = networks[key].split(/\s+/)
if (ips.length === 1 && index !== undefined) {
// New protocol or alias
addresses[key] = networks[key]
} else if (index !== '0' && index !== undefined) {
// Should never happen (alias with index >0)
continue
} else {
// Old protocol
ips.forEach((ip, i) => {
addresses[`${device}/ipv4/${i}`] = ip
})
}
}
const vm = {
@@ -405,6 +429,7 @@ const TRANSFORMS = {
name_label: obj.name_label,
other: otherConfig,
os_version: (guestMetrics && guestMetrics.os_version) || null,
parent: link(obj, 'parent'),
power_state: obj.power_state,
hasVendorDevice: obj.has_vendor_device,
resourceSet,

View File

@@ -99,13 +99,16 @@ export default class Xapi extends XapiBase {
this._snapshotVm = limitConcurrency(vmSnapshotConcurrency)(this._snapshotVm)
// Patch getObject to resolve _xapiId property.
this.getObject = (getObject => (...args) => {
let tmp
if ((tmp = args[0]) != null && (tmp = tmp._xapiId) != null) {
args[0] = tmp
this.getObject = (
getObject =>
(...args) => {
let tmp
if ((tmp = args[0]) != null && (tmp = tmp._xapiId) != null) {
args[0] = tmp
}
return getObject.apply(this, args)
}
return getObject.apply(this, args)
})(this.getObject)
)(this.getObject)
}
// Wait for an object to be in a given state.
@@ -182,14 +185,26 @@ export default class Xapi extends XapiBase {
//
// If `force` is false and the evacuation failed, the host is re-
// enabled and the error is thrown.
async clearHost({ $ref: ref }, force) {
await this.call('host.disable', ref)
async clearHost({ $ref: hostRef, $pool: pool }, force) {
await this.call('host.disable', hostRef)
const migrationNetworkId = pool.other_config['xo:migrationNetwork']
const migrationNetworkRef = migrationNetworkId && this.getObject(migrationNetworkId).$ref
try {
await this.callAsync('host.evacuate', ref)
try {
await (migrationNetworkRef === undefined
? this.callAsync('host.evacuate', hostRef)
: this.callAsync('host.evacuate', hostRef, migrationNetworkRef))
} catch (error) {
if (error.code === 'MESSAGE_PARAMETER_COUNT_MISMATCH') {
await this.callAsync('host.evacuate', hostRef)
} else {
throw error
}
}
} catch (error) {
if (!force) {
await this.call('host.enable', ref)
await this.call('host.enable', hostRef)
throw error
}
@@ -724,6 +739,7 @@ export default class Xapi extends XapiBase {
blocked_operations: {
...delta.vm.blocked_operations,
start: 'Importing…',
start_on: 'Importing…',
},
ha_always_run: false,
is_a_template: false,
@@ -848,9 +864,11 @@ export default class Xapi extends XapiBase {
delta.vm.ha_always_run && vm.set_ha_always_run(true),
vm.set_name_label(name_label),
// FIXME: move
vm.update_blocked_operations(
'start',
disableStartAfterImport ? 'Do not start this VM, clone it if you want to use it.' : null
asyncMap(['start', 'start_on'], op =>
vm.update_blocked_operations(
op,
disableStartAfterImport ? 'Do not start this VM, clone it if you want to use it.' : null
)
),
])
@@ -959,10 +977,10 @@ export default class Xapi extends XapiBase {
@synchronized()
_callInstallationPlugin(hostRef, vdi) {
return this.call('host.call_plugin', hostRef, 'install-supp-pack', 'install', { vdi }).catch(error => {
if (error.code !== 'XENAPI_PLUGIN_FAILURE') {
log.warn('_callInstallationPlugin', { error })
if (error.code !== 'XENAPI_PLUGIN_FAILURE' || !error.params?.[2]?.includes?.('UPDATE_ALREADY_APPLIED')) {
throw error
}
log.warn('_callInstallationPlugin', { error })
})
}
@@ -1094,7 +1112,7 @@ export default class Xapi extends XapiBase {
$defer.onFailure(() => this.VM_destroy(vm.$ref))
// Disable start and change the VM name label during import.
await Promise.all([
vm.update_blocked_operations('start', 'OVA import in progress...'),
asyncMapSettled(['start', 'start_on'], op => vm.update_blocked_operations(op, 'OVA import in progress...')),
vm.set_name_label(`[Importing...] ${nameLabel}`),
])
@@ -1166,7 +1184,7 @@ export default class Xapi extends XapiBase {
})
// Enable start and restore the VM name label after import.
await Promise.all([vm.update_blocked_operations('start', null), vm.set_name_label(nameLabel)])
await Promise.all([vm.update_blocked_operations({ start: null, start_on: null }), vm.set_name_label(nameLabel)])
return vm
}
@@ -1300,7 +1318,7 @@ export default class Xapi extends XapiBase {
log.debug(`Starting VM ${vm.name_label}`)
if (force) {
await vm.update_blocked_operations('start', null)
await vm.update_blocked_operations({ start: null, start_on: null })
}
return hostId === undefined

View File

@@ -451,7 +451,11 @@ export default {
set: (secureBoot, vm) => vm.update_platform('secureboot', secureBoot.toString()),
},
hvmBootFirmware: {
set: (firmware, vm) => vm.update_HVM_boot_params('firmware', firmware),
set: (firmware, vm) =>
Promise.all([
vm.update_HVM_boot_params('firmware', firmware),
vm.update_platform('device-model', 'qemu-upstream-' + (firmware === 'uefi' ? 'uefi' : 'compat')),
]),
},
}),

View File

@@ -158,7 +158,8 @@ export default class BackupNg {
if (poolPattern !== undefined) {
const poolIds =
extractIdsFromSimplePattern({ id: poolPattern }) ??
poolPattern.__and?.flatMap?.(pattern => extractIdsFromSimplePattern({ id: pattern }) ?? [])
poolPattern.__and?.flatMap?.(pattern => extractIdsFromSimplePattern({ id: pattern }) ?? []) ??
[]
poolIds.forEach(id => {
try {
app.getObject(id)
@@ -406,6 +407,7 @@ export default class BackupNg {
let rootTaskId
const logger = this._logger
try {
let result
if (remote.proxy !== undefined) {
const { allowUnauthorized, host, password, username } = await app.getXenServer(
app.getXenServerIdByObject(sr.$id)
@@ -437,7 +439,7 @@ export default class BackupNg {
const localTaskIds = { __proto__: null }
for await (const log of logsStream) {
handleBackupLog(log, {
result = handleBackupLog(log, {
logger,
localTaskIds,
handleRootTaskId: id => {
@@ -454,7 +456,7 @@ export default class BackupNg {
throw error
}
} else {
await Disposable.use(app.getBackupsRemoteAdapter(remote), async adapter => {
result = await Disposable.use(app.getBackupsRemoteAdapter(remote), async adapter => {
const metadata = await adapter.readVmBackupMetadata(metadataFilename)
const localTaskIds = { __proto__: null }
return Task.run(
@@ -487,6 +489,7 @@ export default class BackupNg {
)
})
}
return result.id
} finally {
this._runningRestores.delete(rootTaskId)
}

View File

@@ -77,6 +77,6 @@ export default class ConfigManagement {
await manager.importer(data)
}
}
await this._app.clean()
await this._app.hooks.clean()
}
}

View File

@@ -68,7 +68,9 @@ export default class IpPools {
if (await store.has(id)) {
await Promise.all(
(await this._app.getAllResourceSets()).map(async set => {
(
await this._app.getAllResourceSets()
).map(async set => {
await this._app.removeLimitFromResourceSet(`ipPool:${id}`, set.id)
return this._app.removeIpPoolFromResourceSet(id, set.id)
})

View File

@@ -1,3 +1,5 @@
import transportConsole from '@xen-orchestra/log/transports/console.js'
import { configure } from '@xen-orchestra/log/configure.js'
import { defer, fromEvent } from 'promise-toolbox'
import LevelDbLogger from './loggers/leveldb.mjs'
@@ -7,6 +9,17 @@ export default class Logs {
this._app = app
app.hooks.on('clean', () => this._gc())
const transport = transportConsole()
app.config.watch('logs', ({ filter, level }) => {
configure([
{
filter: [process.env.DEBUG, filter],
level,
transport,
},
])
})
}
async _gc(keep = 2e4) {

Some files were not shown because too many files have changed in this diff Show More