Compare commits

...

117 Commits

Author SHA1 Message Date
Julien Fontanet
d32c5b31e7 WiP: feat(mixin): support for lazy mixins 2022-06-23 16:38:13 +02:00
Julien Fontanet
667d0724c3 docs(configuration/custom ca): fix systemd path
Introduced by 03a66e469
2022-06-22 11:32:24 +02:00
Julien Fontanet
a49395553a docs(configuration/custom ca): fix systemd path
Introduced by 03a66e469
2022-06-22 11:30:09 +02:00
Julien Fontanet
cce09bd9cc docs(configuration/custom ca): add note regarding XO Proxy 2022-06-22 10:44:25 +02:00
Julien Fontanet
03a66e4690 docs(configuration/custom ca): use separate systemd file
This is better as it avoids conflicts with existing config and is compatible with the way XO Proxy service is handled.
2022-06-22 10:44:25 +02:00
Florent BEAUCHAMP
fd752fee80 feat(backups,vhd-lib): implement copyless merge (#6271) 2022-06-22 10:36:57 +02:00
Julien Fontanet
8a71f84733 chore(xo-server): remove Model wrapping 2022-06-22 10:10:39 +02:00
Julien Fontanet
9ef2c7da4c chore(complex-matcher): remove build step 2022-06-22 09:55:59 +02:00
Julien Fontanet
8975073416 fix(xapi): add missing file
Introduced by b12c17947

Thanks @Danp2.
2022-06-22 00:07:32 +02:00
Julien Fontanet
d1c1378c9d feat(xo-server-db): minimal CLI to browser the DB 2022-06-21 18:11:44 +02:00
Julien Fontanet
7941284a1d feat(xo-server/collection/Redis): set of all indexes 2022-06-21 17:47:56 +02:00
Julien Fontanet
af2d17b7a5 feat(xo-server/collection/Redis): set of all namespaces 2022-06-21 17:29:19 +02:00
Julien Fontanet
3ca2b01d9a feat(xo-server/collection/Redis): assert namespace doesnt contain _ or : 2022-06-21 17:24:10 +02:00
Julien Fontanet
67193a2ab7 chore(xo-server/collection/Redis): replace prefix by namespace 2022-06-21 17:23:25 +02:00
Julien Fontanet
9757aa36de chore(xo-server/collection/Redis): _id field was never used 2022-06-21 17:23:18 +02:00
Julien Fontanet
29854a9f87 feat(xo-server): new sr.{enable,disable}MaintenanceMode methods 2022-06-21 15:07:09 +02:00
Julien Fontanet
b12c179470 feat(xapi): new SR_{enable,disable}MaintenanceMode methods 2022-06-21 15:07:09 +02:00
Julien Fontanet
bbef15e4e4 feat(xo-server/proxy.get{,All}); return associated URL(s) (#6291) 2022-06-21 11:33:25 +02:00
Florent BEAUCHAMP
c483929a0d fix(ova import): drain disk entry completly (#6284) 2022-06-20 16:09:20 +02:00
Julien Fontanet
1741f395dd chore(xo-server/deleteAuthenticationTokens): optimization
Don't use xo-server/deleteAuthenticationToken to avoid fetching the records twice.
2022-06-19 11:37:42 +02:00
Julien Fontanet
0f29262797 chore(value-matcher): remove build step 2022-06-19 11:28:11 +02:00
Julien Fontanet
31ed477b96 feat(xo-server/token.delete): available for non-admins 2022-06-17 11:59:29 +02:00
Julien Fontanet
9e5de5413d feat(xo-server/Collection#remove): accept a pattern 2022-06-17 11:59:29 +02:00
Florent BEAUCHAMP
0f297a81a4 feat(xo-remote-parser): additional parameters in URL (#6270) 2022-06-16 23:14:34 +02:00
Mathieu
89313def99 fix(xapi/vm): throw forbiddenOperation on blockedOperation (#6290) 2022-06-16 14:39:20 +02:00
Julien Fontanet
8e0be4edaf feat(xo-server/vm.set): blockedOperations now accepts string reasons and null
Related to #6290
2022-06-16 10:16:43 +02:00
Julien Fontanet
a8dfdfb922 fix(event-listeners-manager/add): _listeners is a Map 2022-06-15 14:37:38 +02:00
Julien Fontanet
f096024248 chore(event-listeners-manager): add tests 2022-06-15 14:37:31 +02:00
Julien Fontanet
4f50f90213 feat(xo-server/token.create): minimum duration is now one minute
This change also handles negative or zero invalid durations.
2022-06-15 11:26:32 +02:00
Julien Fontanet
4501902331 feat(xo-server): XO Proxy channel based on current channel (#6277) 2022-06-15 10:42:57 +02:00
Julien Fontanet
df19679dba fix(xo-cli): close connection when finished 2022-06-15 10:25:06 +02:00
Julien Fontanet
9f5a2f67f9 fix(xo-cli): xdg-basedir import
Introduced by 2d5c40632
2022-06-15 10:22:39 +02:00
Julien Fontanet
2d5c406325 chore: update dev deps 2022-06-13 19:33:09 +02:00
Julien Fontanet
151b8a8940 feat(read-chunk): add readChunkStrict 2022-06-13 12:01:02 +02:00
Julien Fontanet
cda027b94a docs(read-chunk): behavior when stream has ended 2022-06-13 11:22:42 +02:00
Julien Fontanet
ee2117abf6 chore(CHANGELOG.unreleased): pkgs list should be ordered
See https://team.vates.fr/vates/pl/1q6or14b9jffjfxk9qyebfg6sh
2022-06-13 11:22:08 +02:00
Thierry Goettelmann
6e7294d49f feat: release 5.71.1 (#6285) 2022-06-13 11:06:36 +02:00
Manon Mercier
062e45f697 docs(backup/troubleshooting): add no XAPI associated error (#6279)
Co-authored-by: Jon Sands <fohdeesha@gmail.com>
2022-06-13 10:07:20 +02:00
Julien Fontanet
d18b39990d feat(xo-server/api): introduce a global async API context (#6274)
This allows access to contextual data deep inside the call stack.

Example use cases:
- current user
- specific permission (e.g. read only token)
- current authentication token
2022-06-13 09:43:39 +02:00
Julien Fontanet
7387ac2411 fix(xo-server/disk.import): fix xapi._getOrWaitObject call
Maybe related to #6282

Introduced by 5063a6982
2022-06-10 17:34:33 +02:00
Thierry Goettelmann
4186592f9f feat: technical release (#6281) 2022-06-10 17:05:04 +02:00
Thierry Goettelmann
6c9d5a72a6 feat(xo-web/backup): show cleanVm logs only in case of warnings (#6280) 2022-06-09 22:07:29 +02:00
Julien Fontanet
83690a4dd4 fix(xo-server/_importOvaVm): fix VM creation
Fixes https://xcp-ng.org/forum/post/49920

Introduced by 2af5328a0f
2022-06-09 18:51:35 +02:00
Florent BEAUCHAMP
c11e03ab26 fix(xo-vmdk-to-vhd/generateVmdkData): don't use VM name as OVF filename
It might break the OVA depending on present characters.
2022-06-09 17:18:30 +02:00
Florent BEAUCHAMP
c7d8709267 fix(xo-vmdk-to-vhd/generateVmdkData): reduce compression level
The max value (9) is very slow and should be avoided.
2022-06-09 17:18:30 +02:00
Florent BEAUCHAMP
6579deffad fix(xo-server): don't create zombie task on OVA export
Introduced by 4b9db257f
2022-06-09 17:18:30 +02:00
Julien Fontanet
e2739e7a4b fix(xo-server): make auth tokens created_at/expiration numbers 2022-06-09 16:15:14 +02:00
Florent BEAUCHAMP
c0d587f541 fix(backups): task warning if beforeBackup or checkBaseVdis steps fail (#6266) 2022-06-09 14:39:25 +02:00
Florent BEAUCHAMP
05a96ffc14 fix(xo-web): handle missing result of broken merge tasks in backup logs (#6275) 2022-06-09 14:14:26 +02:00
Julien Fontanet
32a47444d7 feat(proxy-cli): new --url flag
Which can be used instead of `--host` and `--token`.
2022-06-09 13:38:06 +02:00
Julien Fontanet
9ff5de5f33 feat(xo-server): expose _xapiRef to the API
Fixes zammad#7439

This makes objects searchable by their opaque ref in the UI.
2022-06-09 09:52:17 +02:00
Julien Fontanet
09badf33d0 feat(docs/configuration): use NODE_EXTRA_CA_CERTS instead of --use-openssl-ca (#6226)
Fixes zammad#6310

Easier to use and compatible with more distributions.
2022-06-09 09:08:16 +02:00
Julien Fontanet
1643d3637f chore(xo-server/api): remove unused api from context 2022-06-08 22:52:24 +02:00
Julien Fontanet
b962e9ebe8 fix(xo-server/system.methodSignature): declare expected params 2022-06-08 22:52:03 +02:00
Julien Fontanet
66f3528e10 fix(xapi/VM_snapshot): handle undefined VM.VUSBs
Fixes zammad#7401
2022-06-08 16:29:27 +02:00
Julien Fontanet
a5e9f051a2 docs(REST API): content-type is no longer necessary with -T
Because it is no longer set by default to `application/x-www-form-urlencoded` like it was with `--data-binary`.
2022-06-07 23:46:14 +02:00
Julien Fontanet
63bfb76516 docs(REST API): use -T instead of --data-binary for cURL
Because `--data-binary` loads the whole data in memory which isn't compatible with big data like a VHD file, whereas `-T` streams the data to the server.
2022-06-07 23:38:05 +02:00
tkrafael
f88f7d41aa fix(xen-api/putResource): use agent for both requests (#6261)
Fixes #6260
2022-06-07 19:33:33 +02:00
Julien Fontanet
877383ac85 fix(xo-server/sr.createExt): fix SR_create call
Introduced by 052126613
2022-06-07 18:59:30 +02:00
Julien Fontanet
dd5e11e835 feat(xo-server/api): don't filters error sent to admin users (#6262)
Previous behavior was hiding all errors not explicitly dedicated to be sent to API users and replacing them with an *unknown error from the peer*.

This was done to avoid leaking sensitive information, but it often hides important info.

Administrators can already see the raw errors in Settings/Logs, therefore it makes sense to not hide them for these users.
2022-06-07 13:34:34 +02:00
Julien Fontanet
3d43550ffe feat(xo-cli): provide authentication token description 2022-06-07 10:57:28 +02:00
Julien Fontanet
115bc8fa0a feat(xo-server): authentication tokens can have a description 2022-06-07 10:57:26 +02:00
Julien Fontanet
15c46e324c feat(xo-server/api): new user.getAuthenticationTokens 2022-06-07 10:04:45 +02:00
Julien Fontanet
df38366066 fix(xo-server/collection/redis#get): correctly filter on properties when id is provided 2022-06-07 10:04:14 +02:00
Julien Fontanet
28b13ccfff fix(xo-server/collection/redis#get): don't mutate properties param 2022-06-07 09:57:25 +02:00
Julien Fontanet
26a433ebbe feat(xo-server/createAuthenticationToken): add created_at field 2022-06-07 09:20:34 +02:00
Julien Fontanet
1902595190 feat(xo-server/getAuthenticationTokensForUser): filter and remove expired tokens 2022-06-07 09:15:30 +02:00
Julien Fontanet
80146cfb58 feat(xo-server/proxies): expose auth tokens
First step to show expose them in the UI, to make XO Proxies easier to use as HTTP proxies.
2022-06-07 09:02:46 +02:00
Yannick Achy
03d2d6fc94 docs(backups): explain HTTP timeout error and auto power on behavior (#6263)
Co-authored-by: Jon Sands <fohdeesha@gmail.com>
2022-06-05 12:21:39 +02:00
Julien Fontanet
379e4d7596 chore(xo-server): use @xen-orchestra/xapi/VBD_unplug 2022-06-02 17:08:22 +02:00
Julien Fontanet
9860bd770b chore(xo-server): use @xen-orchestra/xapi/VBD_destroy 2022-06-02 17:07:18 +02:00
Julien Fontanet
2af5328a0f chore(xo-server): use @xen-orchestra/xapi/VM_create 2022-06-02 17:02:10 +02:00
Julien Fontanet
4084a44f83 chore(xo-server): use @xen-orchestra/xapi/VDI_exportContent 2022-06-02 16:57:21 +02:00
Julien Fontanet
ba7c7ddb23 chore(xo-server): use @xen-orchestra/xapi/VDI_importContent 2022-06-02 16:54:23 +02:00
Julien Fontanet
2351e7b98c chore(xo-server): use @xen-orchestra/xapi/VBD_create 2022-06-02 16:37:49 +02:00
Julien Fontanet
d353dc622c fix(xapi/VBD_create): don't fail if the VBD could not be plugged
Otherwise, the creation method would have failed but the VBD would still exist, violating the principle of least surprise.
2022-06-02 16:26:29 +02:00
Julien Fontanet
3ef6adfd02 feat(xapi/VBD_create): returns the new VBD's ref 2022-06-02 16:25:19 +02:00
Julien Fontanet
5063a6982a chore(xo-server): use @xen-orchestra/xapi/VDI_create 2022-06-02 16:10:16 +02:00
Julien Fontanet
0008f2845c feat(xapi/VDI_create): move sm_config in second param
Similarly to other creation methods, properties that must be explicited are passed in second param.
2022-06-02 14:45:57 +02:00
Julien Fontanet
a0994bc428 fix(scripts/gen-deps-list.js): add missing await
Introduced by a0836ebdd
2022-06-01 16:51:31 +02:00
Julien Fontanet
8fe0d97aec fix(scripts/gen-deps-list.js): fix packages order (#6259)
`deptree` nodes should be added only once with the full list of their dependencies.

For better display, packages are sorted by names before resolving the graph for nicer display.
2022-06-01 16:07:36 +02:00
Julien Fontanet
a8b3c02780 chore(CHANGELOG): integrate released changes 2022-06-01 15:56:01 +02:00
Julien Fontanet
f3489fb57c feat(xo-web): 5.97.1 2022-06-01 15:51:16 +02:00
Julien Fontanet
434b5b375d feat(xo-server): 5.95.0 2022-06-01 15:51:16 +02:00
Julien Fontanet
445120f9f5 feat(@xen-orchestra/proxy): 0.23.1 2022-06-01 15:51:16 +02:00
Julien Fontanet
71b11f0d9c feat(@xen-orchestra/xapi): 1.1.0 2022-06-01 15:51:16 +02:00
Julien Fontanet
8297a9e0e7 feat(@xen-orchestra/fs): 1.0.3 2022-06-01 15:51:16 +02:00
Florent BEAUCHAMP
4999672f2d fix(xo-web/backups): scheduled health check is available to enterprise (#6257)
Introduced by cae3555ca
2022-06-01 15:36:36 +02:00
Thierry Goettelmann
70608ed7e9 fix(scripts/gen-deps-lists.js): various fixes 2022-06-01 14:04:41 +02:00
Julien Fontanet
a0836ebdd7 feat(scripts/gen-deps-list.js): test mode (#6258) 2022-06-01 13:53:56 +02:00
Florent BEAUCHAMP
2b1edd1d4c feat: always log and display full remote errors (#6216)
Co-authored-by: Julien Fontanet <julien.fontanet@isonoe.net>
2022-05-31 17:30:27 +02:00
Thierry Goettelmann
42bb7cc973 feat: release 5.71.0 (#6256) 2022-05-31 16:20:41 +02:00
Julien Fontanet
8299c37bb7 fix(xo-server/pool.rollingUpdate): duplicate poolId declaration
Introduced by 7a2005c20
2022-05-31 14:32:13 +02:00
Mathieu
7a2005c20c feat(xo-server/pool): disable scheduled job when starting RPU (#6244)
See zammad#5377, zammad#5333
2022-05-31 11:59:52 +02:00
Pierre Donias
ae0eb9e66e fix(xo-web/health): make "Too many snapshots" table sortable by number of snaphots (#6255)
See zammad#6439
2022-05-31 11:45:11 +02:00
Julien Fontanet
052126613a feat(xapi,xo-server): create SRs with other_config.auto-scan=true (#6246)
Fixes https://team.vates.fr/vates/pl/nf18hnr51f8f3f3brcbra57uar
2022-05-31 11:24:15 +02:00
l-austenfeld
7959657bd6 fix(xo-server/xapi): missing shutdownHost default parameter (#6253)
Add a default empty object parameter to enable calls to shutdownHost with only one parameter.
This implicitly fixes the density load-balancer, since it calls shutdownHost with only one parameter.
2022-05-31 10:01:47 +02:00
Thierry Goettelmann
9f8bb376ea feat: technical release (#6254) 2022-05-30 17:45:59 +02:00
Julien Fontanet
ee8e2fa906 docs(REST API): use | cat trick in VDI import example 2022-05-30 16:51:35 +02:00
Julien Fontanet
33a380b173 docs(REST API): add name_label param in VDI import example 2022-05-30 16:50:36 +02:00
Julien Fontanet
6e5b6996fa docs(REST API): add required content-type in VM import 2022-05-30 16:48:01 +02:00
Julien Fontanet
6409dc276c docs(REST API): don't use --progress-bar in VDI import example
This is not necessary and more in line with other examples.
2022-05-30 16:46:54 +02:00
Julien Fontanet
98f7ce43e3 feat(xo-server/RestApi): VDI import now returns the new VDI's UUID 2022-05-30 16:45:41 +02:00
Julien Fontanet
aa076e1d2d chore(xo-server/rest-api): use xen-api shorthand syntax 2022-05-30 16:23:39 +02:00
Julien Fontanet
7a096d1b5c chore(xo-server/rest-api): remove unnecessary awaits 2022-05-30 16:00:43 +02:00
Julien Fontanet
93b17ccddd chore(xo-server/api/vm): format with Prettier
Introduced by d7d81431e
2022-05-30 16:00:43 +02:00
Julien Fontanet
68c118c3e5 fix(xo-server/api/vm): add missing quote
Introduced by d7d81431e
2022-05-30 16:00:43 +02:00
Thierry Goettelmann
c0b0ba433f feat(backups,xo-web): add cleanVm warnings to task (#6225) 2022-05-30 15:39:54 +02:00
Thierry Goettelmann
d7d81431ef feat(xo-server/vm.migrate): call VM.assert_can_migrate before (#6245)
Fixes #5301
2022-05-30 15:04:12 +02:00
Pierre Donias
7451f45885 fix(xo-web/home): don't make VM's resource set name clickable for non-admins (#6252)
See https://xcp-ng.org/forum/topic/5902/permissions-for-users-to-be-able-to-snapshot/5?_=1653902135402

Non-admin users aren't allowed to view the Self Service page so it doesn't make
sense to have a link to that page
2022-05-30 15:02:03 +02:00
Florent BEAUCHAMP
c9882001a9 fix(xo-web,xo-server): store health check settings in job instead of schedule (#6251)
Introduced by cae3555ca7
2022-05-30 14:56:28 +02:00
Mathieu
837b06ef2b feat(xo-server/xo-web/pool): avoid RPU/ host reboot, shutdown / host agent reboot during backup (#6232)
See zammad#5377
2022-05-30 11:13:13 +02:00
Julien Fontanet
0e49150b8e feat(xo-server/RestApi): add VDI import
Related to zammad#7036
2022-05-29 20:48:59 +02:00
Julien Fontanet
0ec5f4bf68 chore(proxy,xo-server): update to http-server-plus@0.11.1
This new version fixes, among others, the support of the `Expect: 100-Continue` HTTP header, which is notably used by cURL during `POST`.
2022-05-29 20:44:00 +02:00
Julien Fontanet
601730d737 feat(xapi): new SR_importVdi()
Creates a new VDI on an SR from a VHD.
2022-05-29 20:44:00 +02:00
Julien Fontanet
28eb4b21bd fix(xo-server/disk.import): VHD import
Introduced by 0706e6f4ff
2022-05-29 14:09:08 +02:00
Julien Fontanet
a5afe0bca1 feat(vhd-lib/peekFooterFromStream): check checksum and content 2022-05-29 14:07:48 +02:00
150 changed files with 3538 additions and 2628 deletions

View File

@@ -9,7 +9,7 @@ exports.EventListenersManager = class EventListenersManager {
}
add(type, listener) {
let listeners = this._listeners[type]
let listeners = this._listeners.get(type)
if (listeners === undefined) {
listeners = new Set()
this._listeners.set(type, listeners)

View File

@@ -0,0 +1,67 @@
'use strict'
const t = require('tap')
const { EventEmitter } = require('events')
const { EventListenersManager } = require('./')
const noop = Function.prototype
// function spy (impl = Function.prototype) {
// function spy() {
// spy.calls.push([Array.from(arguments), this])
// }
// spy.calls = []
// return spy
// }
function assertListeners(t, event, listeners) {
t.strictSame(t.context.ee.listeners(event), listeners)
}
t.beforeEach(function (t) {
t.context.ee = new EventEmitter()
t.context.em = new EventListenersManager(t.context.ee)
})
t.test('.add adds a listener', function (t) {
t.context.em.add('foo', noop)
assertListeners(t, 'foo', [noop])
t.end()
})
t.test('.add does not add a duplicate listener', function (t) {
t.context.em.add('foo', noop).add('foo', noop)
assertListeners(t, 'foo', [noop])
t.end()
})
t.test('.remove removes a listener', function (t) {
t.context.em.add('foo', noop).remove('foo', noop)
assertListeners(t, 'foo', [])
t.end()
})
t.test('.removeAll removes all listeners of a given type', function (t) {
t.context.em.add('foo', noop).add('bar', noop).removeAll('foo')
assertListeners(t, 'foo', [])
assertListeners(t, 'bar', [noop])
t.end()
})
t.test('.removeAll removes all listeners', function (t) {
t.context.em.add('foo', noop).add('bar', noop).removeAll()
assertListeners(t, 'foo', [])
assertListeners(t, 'bar', [])
t.end()
})

View File

@@ -37,6 +37,10 @@
"license": "ISC",
"version": "1.0.0",
"scripts": {
"postversion": "npm publish --access public"
"postversion": "npm publish --access public",
"test": "tap --branches=72"
},
"devDependencies": {
"tap": "^16.2.0"
}
}

View File

@@ -1,6 +1,9 @@
### `readChunk(stream, [size])`
- returns the next available chunk of data
- like `stream.read()`, a number of bytes can be specified
- returns `null` if the stream has ended
- returns with less data than expected if stream has ended
- returns `null` if the stream has ended and no data has been read
```js
import { readChunk } from '@vates/read-chunk'
@@ -11,3 +14,13 @@ import { readChunk } from '@vates/read-chunk'
}
})()
```
### `readChunkStrict(stream, [size])`
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
```js
import { readChunkStrict } from '@vates/read-chunk'
const chunk = await readChunkStrict(stream, 1024)
```

View File

@@ -16,9 +16,12 @@ Installation of the [npm package](https://npmjs.org/package/@vates/read-chunk):
## Usage
### `readChunk(stream, [size])`
- returns the next available chunk of data
- like `stream.read()`, a number of bytes can be specified
- returns `null` if the stream has ended
- returns with less data than expected if stream has ended
- returns `null` if the stream has ended and no data has been read
```js
import { readChunk } from '@vates/read-chunk'
@@ -30,6 +33,16 @@ import { readChunk } from '@vates/read-chunk'
})()
```
### `readChunkStrict(stream, [size])`
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
```js
import { readChunkStrict } from '@vates/read-chunk'
const chunk = await readChunkStrict(stream, 1024)
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -30,3 +30,22 @@ const readChunk = (stream, size) =>
onReadable()
})
exports.readChunk = readChunk
exports.readChunkStrict = async function readChunkStrict(stream, size) {
const chunk = await readChunk(stream, size)
if (chunk === null) {
throw new Error('stream has ended without data')
}
if (size !== undefined && chunk.length !== size) {
const error = new Error('stream has ended with not enough data')
Object.defineProperties(error, {
chunk: {
value: chunk,
},
})
throw error
}
return chunk
}

View File

@@ -4,7 +4,7 @@
const { Readable } = require('stream')
const { readChunk } = require('./')
const { readChunk, readChunkStrict } = require('./')
const makeStream = it => Readable.from(it, { objectMode: false })
makeStream.obj = Readable.from
@@ -43,3 +43,27 @@ describe('readChunk', () => {
})
})
})
const rejectionOf = promise =>
promise.then(
value => {
throw value
},
error => error
)
describe('readChunkStrict', function () {
it('throws if stream is empty', async () => {
const error = await rejectionOf(readChunkStrict(makeStream([])))
expect(error).toBeInstanceOf(Error)
expect(error.message).toBe('stream has ended without data')
expect(error.chunk).toEqual(undefined)
})
it('throws if stream ends with not enough data', async () => {
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
expect(error).toBeInstanceOf(Error)
expect(error.message).toBe('stream has ended with not enough data')
expect(error.chunk).toEqual(Buffer.from('foobar'))
})
})

View File

@@ -7,8 +7,8 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.23.0",
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/backups": "^0.25.0",
"@xen-orchestra/fs": "^1.0.3",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
@@ -27,7 +27,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.7.1",
"version": "0.7.3",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -6,7 +6,7 @@ const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { compileTemplate } = require('@xen-orchestra/template')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern.js')
const { extractIdsFromSimplePattern } = require('./extractIdsFromSimplePattern.js')
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
const { Task } = require('./Task.js')
const { VmBackup } = require('./_VmBackup.js')

View File

@@ -153,6 +153,13 @@ class VmBackup {
errors.push(error)
this.delete(writer)
warn(warnMessage, { error, writer: writer.constructor.name })
// these two steps are the only one that are not already in their own sub tasks
if (warnMessage === 'writer.checkBaseVdis()' || warnMessage === 'writer.beforeBackup()') {
Task.warning(
`the writer ${writer.constructor.name} has failed the step ${warnMessage} with error ${error.message}. It won't be used anymore in this job execution.`
)
}
}
})
if (writers.size === 0) {

View File

@@ -47,42 +47,32 @@ const computeVhdsSize = (handler, vhdPaths) =>
// | |
// \___________rename_____________/
async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
async function mergeVhdChain(chain, { handler, logInfo, remove, merge }) {
assert(chain.length >= 2)
const chainCopy = [...chain]
const parent = chainCopy.pop()
const children = chainCopy
if (merge) {
onLog(`merging ${children.length} children into ${parent}`)
logInfo(`merging children into parent`, { childrenCount: children.length, parent })
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
onLog(`merging ${children.join(',')} into ${parent}: ${done}/${total}`)
logInfo(`merging children in progress`, { children, parent, doneCount: done, totalCount: total })
}
}, 10e3)
const mergedSize = await mergeVhd(handler, parent, handler, children, {
logInfo,
onProgress({ done: d, total: t }) {
done = d
total = t
},
remove,
})
clearInterval(handle)
const mergeTargetChild = children.shift()
await Promise.all([
VhdAbstract.rename(handler, parent, mergeTargetChild),
asyncMap(children, child => {
onLog(`the VHD ${child} is already merged`)
if (remove) {
onLog(`deleting merged VHD ${child}`)
return VhdAbstract.unlink(handler, child)
}
}),
])
return mergedSize
}
}
@@ -125,14 +115,19 @@ const listVhds = async (handler, vmDir) => {
return { vhds, interruptedVhds, aliases }
}
async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog = noop, remove = false }) {
async function checkAliases(
aliasPaths,
targetDataRepository,
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
) {
const aliasFound = []
for (const path of aliasPaths) {
const target = await resolveVhdAlias(handler, path)
if (!isVhdFile(target)) {
onLog(`Alias ${path} references a non vhd target: ${target}`)
logWarn('alias references non VHD target', { path, target })
if (remove) {
logInfo('removing alias and non VHD target', { path, target })
await handler.unlink(target)
await handler.unlink(path)
}
@@ -147,13 +142,13 @@ async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog =
// error during dispose should not trigger a deletion
}
} catch (error) {
onLog(`target ${target} of alias ${path} is missing or broken`, { error })
logWarn('missing or broken alias target', { target, path, error })
if (remove) {
try {
await VhdAbstract.unlink(handler, path)
} catch (e) {
if (e.code !== 'ENOENT') {
onLog(`Error while deleting target ${target} of alias ${path}`, { error: e })
} catch (error) {
if (error.code !== 'ENOENT') {
logWarn('error deleting alias target', { target, path, error })
}
}
}
@@ -170,20 +165,22 @@ async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog =
entries.forEach(async entry => {
if (!aliasFound.includes(entry)) {
onLog(`the Vhd ${entry} is not referenced by a an alias`)
logWarn('no alias references VHD', { entry })
if (remove) {
logInfo('deleting unaliased VHD')
await VhdAbstract.unlink(handler, entry)
}
}
})
}
exports.checkAliases = checkAliases
const defaultMergeLimiter = limitConcurrency(1)
exports.cleanVm = async function cleanVm(
vmDir,
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, onLog = noop }
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, logInfo = noop, logWarn = console.warn }
) {
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
@@ -214,9 +211,9 @@ exports.cleanVm = async function cleanVm(
})
} catch (error) {
vhds.delete(path)
onLog(`error while checking the VHD with path ${path}`, { error })
logWarn('VHD check error', { path, error })
if (error?.code === 'ERR_ASSERTION' && remove) {
onLog(`deleting broken ${path}`)
logInfo('deleting broken path', { path })
return VhdAbstract.unlink(handler, path)
}
}
@@ -228,12 +225,12 @@ exports.cleanVm = async function cleanVm(
const statePath = interruptedVhds.get(interruptedVhd)
interruptedVhds.delete(interruptedVhd)
onLog('orphan merge state', {
logWarn('orphan merge state', {
mergeStatePath: statePath,
missingVhdPath: interruptedVhd,
})
if (remove) {
onLog(`deleting orphan merge state ${statePath}`)
logInfo('deleting orphan merge state', { statePath })
await handler.unlink(statePath)
}
}
@@ -242,7 +239,7 @@ exports.cleanVm = async function cleanVm(
// check if alias are correct
// check if all vhd in data subfolder have a corresponding alias
await asyncMap(Object.keys(aliases), async dir => {
await checkAliases(aliases[dir], `${dir}/data`, { handler, onLog, remove })
await checkAliases(aliases[dir], `${dir}/data`, { handler, logInfo, logWarn, remove })
})
// remove VHDs with missing ancestors
@@ -264,9 +261,9 @@ exports.cleanVm = async function cleanVm(
if (!vhds.has(parent)) {
vhds.delete(vhdPath)
onLog(`the parent ${parent} of the VHD ${vhdPath} is missing`)
logWarn('parent VHD is missing', { parent, vhdPath })
if (remove) {
onLog(`deleting orphan VHD ${vhdPath}`)
logInfo('deleting orphan VHD', { vhdPath })
deletions.push(VhdAbstract.unlink(handler, vhdPath))
}
}
@@ -303,7 +300,7 @@ exports.cleanVm = async function cleanVm(
// check is not good enough to delete the file, the best we can do is report
// it
if (!(await this.isValidXva(path))) {
onLog(`the XVA with path ${path} is potentially broken`)
logWarn('XVA might be broken', { path })
}
})
@@ -317,7 +314,7 @@ exports.cleanVm = async function cleanVm(
try {
metadata = JSON.parse(await handler.readFile(json))
} catch (error) {
onLog(`failed to read metadata file ${json}`, { error })
logWarn('failed to read metadata file', { json, error })
jsons.delete(json)
return
}
@@ -328,9 +325,9 @@ exports.cleanVm = async function cleanVm(
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
} else {
onLog(`the XVA linked to the metadata ${json} is missing`)
logWarn('metadata XVA is missing', { json })
if (remove) {
onLog(`deleting incomplete backup ${json}`)
logInfo('deleting incomplete backup', { json })
jsons.delete(json)
await handler.unlink(json)
}
@@ -351,9 +348,9 @@ exports.cleanVm = async function cleanVm(
vhdsToJSons[path] = json
})
} else {
onLog(`Some VHDs linked to the metadata ${json} are missing`, { missingVhds })
logWarn('some metadata VHDs are missing', { json, missingVhds })
if (remove) {
onLog(`deleting incomplete backup ${json}`)
logInfo('deleting incomplete backup', { json })
jsons.delete(json)
await handler.unlink(json)
}
@@ -394,9 +391,9 @@ exports.cleanVm = async function cleanVm(
}
}
onLog(`the VHD ${vhd} is unused`)
logWarn('unused VHD', { vhd })
if (remove) {
onLog(`deleting unused VHD ${vhd}`)
logInfo('deleting unused VHD', { vhd })
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
}
}
@@ -420,7 +417,7 @@ exports.cleanVm = async function cleanVm(
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(chain, { handler, onLog, remove, merge })
const merged = await limitedMergeVhdChain(chain, { handler, logInfo, logWarn, remove, merge })
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
@@ -432,18 +429,18 @@ exports.cleanVm = async function cleanVm(
...unusedVhdsDeletion,
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
asyncMap(unusedXvas, path => {
onLog(`the XVA ${path} is unused`)
logWarn('unused XVA', { path })
if (remove) {
onLog(`deleting unused XVA ${path}`)
logInfo('deleting unused XVA', { path })
return handler.unlink(path)
}
}),
asyncMap(xvaSums, path => {
// no need to handle checksums for XVAs deleted by the script, they will be handled by `unlink()`
if (!xvas.has(path.slice(0, -'.checksum'.length))) {
onLog(`the XVA checksum ${path} is unused`)
logInfo('unused XVA checksum', { path })
if (remove) {
onLog(`deleting unused XVA checksum ${path}`)
logInfo('deleting unused XVA checksum', { path })
return handler.unlink(path)
}
}
@@ -477,11 +474,11 @@ exports.cleanVm = async function cleanVm(
// don't warn if the size has changed after a merge
if (!merged && fileSystemSize !== size) {
onLog(`incorrect size in metadata: ${size ?? 'none'} instead of ${fileSystemSize}`)
logWarn('incorrect size in metadata', { size: size ?? 'none', fileSystemSize })
}
}
} catch (error) {
onLog(`failed to get size of ${metadataPath}`, { error })
logWarn('failed to get metadata size', { metadataPath, error })
return
}
@@ -491,7 +488,7 @@ exports.cleanVm = async function cleanVm(
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
onLog(`failed to update size in backup metadata ${metadataPath} after merge`, { error })
logWarn('metadata size update failed', { metadataPath, error })
}
}
})

View File

@@ -69,6 +69,8 @@ job.start(data: { mode: Mode, reportWhen: ReportWhen })
├─ task.warning(message: string)
├─ task.start(data: { type: 'VM', id: string })
│ ├─ task.warning(message: string)
| ├─ task.start(message: 'clean-vm')
│ │ └─ task.end
│ ├─ task.start(message: 'snapshot')
│ │ └─ task.end
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, isFull: boolean })
@@ -89,12 +91,8 @@ job.start(data: { mode: Mode, reportWhen: ReportWhen })
│ │ ├─ task.start(message: 'clean')
│ │ │ ├─ task.warning(message: string)
│ │ │ └─ task.end
│ │
│ │ │ // in case of delta backup
│ │ ├─ task.start(message: 'merge')
│ │ │ ├─ task.warning(message: string)
│ │ │ └─ task.end(result: { size: number })
│ │ │
│ │ └─ task.end
| ├─ task.start(message: 'clean-vm')
│ │ └─ task.end
│ └─ task.end
└─ job.end

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.23.0",
"version": "0.25.0",
"engines": {
"node": ">=14.6"
},
@@ -22,7 +22,7 @@
"@vates/disposable": "^0.1.1",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/fs": "^1.0.3",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^4.0.1",
@@ -38,7 +38,7 @@
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"uuid": "^8.3.2",
"vhd-lib": "^3.1.0",
"vhd-lib": "^3.2.0",
"yazl": "^2.5.1"
},
"devDependencies": {
@@ -46,7 +46,7 @@
"tmp": "^0.2.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^1.0.0"
"@xen-orchestra/xapi": "^1.2.0"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -6,8 +6,9 @@ const { join } = require('path')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const MergeWorker = require('../merge-worker/index.js')
const { formatFilenameDate } = require('../_filenameDate.js')
const { Task } = require('../Task.js')
const { warn } = createLogger('xo:backups:MixinBackupWriter')
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
exports.MixinBackupWriter = (BaseClass = Object) =>
class MixinBackupWriter extends BaseClass {
@@ -25,11 +26,17 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
async _cleanVm(options) {
try {
return await this._adapter.cleanVm(this.#vmBackupDir, {
...options,
fixMetadata: true,
onLog: warn,
lock: false,
return await Task.run({ name: 'clean-vm' }, () => {
return this._adapter.cleanVm(this.#vmBackupDir, {
...options,
fixMetadata: true,
logInfo: info,
logWarn: (message, data) => {
warn(message, data)
Task.warning(message, data)
},
lock: false,
})
})
} catch (error) {
warn(error)

View File

@@ -18,7 +18,7 @@
"preferGlobal": true,
"dependencies": {
"golike-defer": "^0.5.1",
"xen-api": "^1.2.0"
"xen-api": "^1.2.1"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/emit-async",
"version": "0.1.0",
"version": "1.0.0",
"license": "ISC",
"description": "Emit an event for async listeners to settle",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/emit-async",

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "1.0.1",
"version": "1.0.3",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",

View File

@@ -1,6 +1,7 @@
import asyncMapSettled from '@xen-orchestra/async-map/legacy'
import getStream from 'get-stream'
import { coalesceCalls } from '@vates/coalesce-calls'
import { createLogger } from '@xen-orchestra/log'
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { parse } from 'xo-remote-parser'
@@ -11,6 +12,8 @@ import { synchronized } from 'decorator-synchronized'
import { basename, dirname, normalize as normalizePath } from './_path'
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
const { warn } = createLogger('@xen-orchestra:fs')
const checksumFile = file => file + '.checksum'
const computeRate = (hrtime, size) => {
const seconds = hrtime[0] + hrtime[1] / 1e9
@@ -357,11 +360,12 @@ export default class RemoteHandlerAbstract {
readRate: computeRate(readDuration, SIZE),
}
} catch (error) {
warn(`error while testing the remote at step ${step}`, { error })
return {
success: false,
step,
file: testFileName,
error: error.message || String(error),
error,
}
} finally {
ignoreErrors.call(this._unlink(testFileName))

View File

@@ -2,7 +2,12 @@
const camelCase = require('lodash/camelCase')
const { defineProperties, defineProperty, keys } = Object
const {
defineProperties,
defineProperty,
hasOwn = Function.prototype.call.bind(Object.prototype.hasOwnProperty),
keys,
} = Object
const noop = Function.prototype
const MIXIN_CYCLIC_DESCRIPTOR = {
@@ -13,23 +18,49 @@ const MIXIN_CYCLIC_DESCRIPTOR = {
}
module.exports = function mixin(object, mixins, args) {
const importing = { __proto__: null }
const importers = { __proto__: null }
function instantiateMixin(name, Mixin) {
defineProperty(object, name, MIXIN_CYCLIC_DESCRIPTOR)
const instance = new Mixin(object, ...args)
defineProperty(object, name, {
value: instance,
})
return instance
}
// add lazy property for each of the mixin, this allows mixins to depend on
// one another without any special ordering
const descriptors = {}
const descriptors = {
loadMixin(name) {
if (hasOwn(this, name)) {
return Promise.resolve(this[name])
}
let promise = importing[name]
if (promise === undefined) {
const clean = () => {
delete importing[name]
}
promise = importers[name]().then(Mixin => instantiateMixin(name, Mixin))
promise.then(clean, clean)
importing[name] = promise
}
return promise
},
}
keys(mixins).forEach(name => {
const Mixin = mixins[name]
name = camelCase(name)
descriptors[name] = {
configurable: true,
get: () => {
defineProperty(object, name, MIXIN_CYCLIC_DESCRIPTOR)
const instance = new Mixin(object, ...args)
defineProperty(object, name, {
value: instance,
})
return instance
},
if (Mixin.prototype === undefined) {
importers[name] = Mixin(name)
} else {
descriptors[name] = {
configurable: true,
get: () => instantiateMixin(name, Mixin),
}
}
})
defineProperties(object, descriptors)

View File

@@ -16,7 +16,7 @@
},
"preferGlobal": false,
"engines": {
"node": ">=6"
"node": ">=7.6"
},
"dependencies": {
"bind-property-descriptor": "^2.0.0",

View File

@@ -14,14 +14,14 @@
"url": "https://vates.fr"
},
"license": "AGPL-3.0-or-later",
"version": "0.4.0",
"version": "0.5.0",
"engines": {
"node": ">=12"
},
"dependencies": {
"@vates/event-listeners-manager": "^1.0.0",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/emit-async": "^0.1.0",
"@xen-orchestra/emit-async": "^1.0.0",
"@xen-orchestra/log": "^0.3.0",
"app-conf": "^2.1.0",
"lodash": "^4.17.21",

View File

@@ -33,26 +33,19 @@ async function main(argv) {
ignoreUnknownFormats: true,
})
const { hostname = 'localhost', port } = config?.http?.listen?.https ?? {}
const {
_: args,
file,
help,
host,
raw,
token,
} = getopts(argv, {
const opts = getopts(argv, {
alias: { file: 'f', help: 'h' },
boolean: ['help', 'raw'],
default: {
token: config.authenticationToken,
},
stopEarly: true,
string: ['file', 'host', 'token'],
string: ['file', 'host', 'token', 'url'],
})
if (help || (file === '' && args.length === 0)) {
const { _: args, file } = opts
if (opts.help || (file === '' && args.length === 0)) {
return console.log(
'%s',
`Usage:
@@ -77,18 +70,29 @@ ${pkg.name} v${pkg.version}`
const baseRequest = {
headers: {
'content-type': 'application/json',
cookie: `authenticationToken=${token}`,
},
pathname: '/api/v1',
protocol: 'https:',
rejectUnauthorized: false,
}
if (host !== '') {
baseRequest.host = host
let { token } = opts
if (opts.url !== '') {
const { protocol, host, username } = new URL(opts.url)
Object.assign(baseRequest, { protocol, host })
if (username !== '') {
token = username
}
} else {
baseRequest.hostname = hostname
baseRequest.port = port
baseRequest.protocol = 'https:'
if (opts.host !== '') {
baseRequest.host = opts.host
} else {
const { hostname = 'localhost', port } = config?.http?.listen?.https ?? {}
baseRequest.hostname = hostname
baseRequest.port = port
}
}
baseRequest.headers.cookie = `authenticationToken=${token}`
const call = async ({ method, params }) => {
if (callPath.length !== 0) {
process.stderr.write(`\n${colors.bold(`--- call #${callPath.join('.')}`)} ---\n\n`)
@@ -127,7 +131,7 @@ ${pkg.name} v${pkg.version}`
stdout.write(inspect(JSON.parse(line), { colors: true, depth: null }))
stdout.write('\n')
}
} else if (raw && typeof result === 'string') {
} else if (opts.raw && typeof result === 'string') {
stdout.write(result)
} else {
stdout.write(inspect(result, { colors: true, depth: null }))

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/proxy-cli",
"version": "0.2.0",
"version": "0.3.0",
"license": "AGPL-3.0-or-later",
"description": "CLI for @xen-orchestra/proxy",
"keywords": [

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.22.1",
"version": "0.23.2",
"license": "AGPL-3.0-or-later",
"description": "XO Proxy used to remotely execute backup jobs",
"keywords": [
@@ -32,13 +32,13 @@
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.23.0",
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/backups": "^0.25.0",
"@xen-orchestra/fs": "^1.0.3",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.4.0",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/xapi": "^1.0.0",
"@xen-orchestra/mixins": "^0.5.0",
"@xen-orchestra/self-signed": "^0.1.3",
"@xen-orchestra/xapi": "^1.2.0",
"ajv": "^8.0.3",
"app-conf": "^2.1.0",
"async-iterator-to-stream": "^1.1.0",
@@ -46,7 +46,7 @@
"get-stream": "^6.0.0",
"getopts": "^2.2.3",
"golike-defer": "^0.5.1",
"http-server-plus": "^0.11.0",
"http-server-plus": "^0.11.1",
"http2-proxy": "^5.0.53",
"json-rpc-protocol": "^0.13.1",
"jsonrpc-websocket-client": "^0.7.2",
@@ -60,7 +60,7 @@
"source-map-support": "^0.5.16",
"stoppable": "^1.0.6",
"xdg-basedir": "^5.1.0",
"xen-api": "^1.2.0",
"xen-api": "^1.2.1",
"xo-common": "^0.8.0"
},
"devDependencies": {

View File

@@ -9,7 +9,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.1.0",
"version": "0.1.3",
"engines": {
"node": ">=8.10"
},

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/upload-ova",
"version": "0.1.4",
"version": "0.1.5",
"license": "AGPL-3.0-or-later",
"description": "Basic CLI to upload ova files to Xen-Orchestra",
"keywords": [
@@ -43,7 +43,7 @@
"pw": "^0.0.4",
"xdg-basedir": "^4.0.0",
"xo-lib": "^0.11.1",
"xo-vmdk-to-vhd": "^2.3.0"
"xo-vmdk-to-vhd": "^2.4.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -0,0 +1,9 @@
'use strict'
// TODO: remove when Node >=15.0
module.exports = class AggregateError extends Error {
constructor(errors, message) {
super(message)
this.errors = errors
}
}

View File

@@ -230,8 +230,9 @@ function mixin(mixins) {
defineProperties(xapiProto, descriptors)
}
mixin({
task: require('./task.js'),
host: require('./host.js'),
SR: require('./sr.js'),
task: require('./task.js'),
VBD: require('./vbd.js'),
VDI: require('./vdi.js'),
VIF: require('./vif.js'),

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/xapi",
"version": "1.0.0",
"version": "1.2.0",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
@@ -15,7 +15,7 @@
"node": ">=14"
},
"peerDependencies": {
"xen-api": "^1.2.0"
"xen-api": "^1.2.1"
},
"scripts": {
"postversion": "npm publish --access public"
@@ -28,6 +28,7 @@
"golike-defer": "^0.5.1",
"lodash": "^4.17.15",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^3.2.0",
"xo-common": "^0.8.0"
},
"private": false,

164
@xen-orchestra/xapi/sr.js Normal file
View File

@@ -0,0 +1,164 @@
'use strict'
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const { decorateClass } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
const { incorrectState } = require('xo-common/api-errors')
const { VDI_FORMAT_RAW } = require('./index.js')
const peekFooterFromStream = require('vhd-lib/peekFooterFromVhdStream')
const AggregateError = require('./_AggregateError.js')
const { warn } = require('@xen-orchestra/log').createLogger('xo:xapi:sr')
const OC_MAINTENANCE = 'xo:maintenanceState'
class Sr {
async create({
content_type = 'user', // recommended by Citrix
device_config,
host,
name_description = '',
name_label,
physical_size = 0,
shared,
sm_config = {},
type,
}) {
const ref = await this.call(
'SR.create',
host,
device_config,
physical_size,
name_label,
name_description,
type,
content_type,
shared,
sm_config
)
// https://developer-docs.citrix.com/projects/citrix-hypervisor-sdk/en/latest/xc-api-extensions/#sr
this.setFieldEntry('SR', ref, 'other_config', 'auto-scan', 'true').catch(warn)
return ref
}
// Switch the SR to maintenance mode:
// - shutdown all running VMs with a VDI on this SR
// - their UUID is saved into SR.other_config[OC_MAINTENANCE].shutdownVms
// - clean shutdown is attempted, and falls back to a hard shutdown
// - unplug all connected hosts from this SR
async enableMaintenanceMode($defer, ref, { vmsToShutdown = [] } = {}) {
const state = { timestamp: Date.now() }
// will throw if already in maintenance mode
await this.call('SR.add_to_other_config', ref, OC_MAINTENANCE, JSON.stringify(state))
await $defer.onFailure.call(this, 'call', 'SR.remove_from_other_config', ref, OC_MAINTENANCE)
const runningVms = new Map()
const handleVbd = async ref => {
const vmRef = await this.getField('VBD', ref, 'VM')
if (!runningVms.has(vmRef)) {
const power_state = await this.getField('VM', vmRef, 'power_state')
const isPaused = power_state === 'Paused'
if (isPaused || power_state === 'Running') {
runningVms.set(vmRef, isPaused)
}
}
}
await asyncMap(await this.getField('SR', ref, 'VDIs'), async ref => {
await asyncMap(await this.getField('VDI', ref, 'VBDs'), handleVbd)
})
{
const runningVmUuids = await asyncMap(runningVms.keys(), ref => this.getField('VM', ref, 'uuid'))
const set = new Set(vmsToShutdown)
for (const vmUuid of runningVmUuids) {
if (!set.has(vmUuid)) {
throw incorrectState({
actual: vmsToShutdown,
expected: runningVmUuids,
property: 'vmsToShutdown',
})
}
}
}
state.shutdownVms = {}
await asyncMapSettled(runningVms, async ([ref, isPaused]) => {
state.shutdownVms[await this.getField('VM', ref, 'uuid')] = isPaused
try {
await this.callAsync('VM.clean_shutdown', ref)
} catch (error) {
warn('SR_enableMaintenanceMode, VM clean shutdown', { error })
await this.callAsync('VM.hard_shutdown', ref)
}
$defer.onFailure.call(this, 'callAsync', 'VM.start', ref, isPaused, true)
})
state.unpluggedPbds = []
await asyncMapSettled(await this.getField('SR', ref, 'PBDs'), async ref => {
if (await this.getField('PBD', ref, 'currently_attached')) {
state.unpluggedPbds.push(await this.getField('PBD', ref, 'uuid'))
await this.callAsync('PBD.unplug', ref)
$defer.onFailure.call(this, 'callAsync', 'PBD.plug', ref)
}
})
await this.setFieldEntry('SR', ref, 'other_config', OC_MAINTENANCE, JSON.stringify(state))
}
// this method is best effort and will not stop on first error
async disableMaintenanceMode(ref) {
const state = JSON.parse((await this.getField('SR', ref, 'other_config'))[OC_MAINTENANCE])
// will throw if not in maintenance mode
await this.call('SR.remove_from_other_config', ref, OC_MAINTENANCE)
const errors = []
await asyncMap(state.unpluggedPbds, async uuid => {
try {
await this.callAsync('PBD.plug', await this.call('PBD.get_by_uuid', uuid))
} catch (error) {
errors.push(error)
}
})
await asyncMap(Object.entries(state.shutdownVms), async ([uuid, isPaused]) => {
try {
await this.callAsync('VM.start', await this.call('VM.get_by_uuid', uuid), isPaused, true)
} catch (error) {
errors.push(error)
}
})
if (errors.length !== 0) {
throw new AggregateError(errors)
}
}
async importVdi(
$defer,
ref,
stream,
{ name_label = '[XO] Imported disk - ' + new Date().toISOString(), ...vdiCreateOpts } = {}
) {
const footer = await peekFooterFromStream(stream)
const vdiRef = await this.VDI_create({ ...vdiCreateOpts, name_label, SR: ref, virtual_size: footer.currentSize })
$defer.onFailure.call(this, 'callAsync', 'VDI.destroy', vdiRef)
await this.VDI_importContent(vdiRef, stream, { format: VDI_FORMAT_RAW })
return vdiRef
}
}
module.exports = Sr
decorateClass(Sr, { enableMaintenanceMode: defer, importVdi: defer })

View File

@@ -6,6 +6,8 @@ const { Ref } = require('xen-api')
const isVmRunning = require('./_isVmRunning.js')
const { warn } = require('@xen-orchestra/log').createLogger('xo:xapi:vbd')
const noop = Function.prototype
module.exports = class Vbd {
@@ -66,8 +68,10 @@ module.exports = class Vbd {
})
if (isVmRunning(powerState)) {
await this.callAsync('VBD.plug', vbdRef)
this.callAsync('VBD.plug', vbdRef).catch(warn)
}
return vbdRef
}
async unplug(ref) {

View File

@@ -30,8 +30,7 @@ class Vdi {
other_config = {},
read_only = false,
sharable = false,
sm_config,
SR,
SR = this.pool.default_SR,
tags,
type = 'user',
virtual_size,
@@ -39,10 +38,10 @@ class Vdi {
},
{
// blindly copying `sm_config` from another VDI can create problems,
// therefore it is ignored by default by this method
// therefore it should be passed explicitly
//
// see https://github.com/vatesfr/xen-orchestra/issues/4482
setSmConfig = false,
sm_config,
} = {}
) {
return this.call('VDI.create', {
@@ -51,7 +50,7 @@ class Vdi {
other_config,
read_only,
sharable,
sm_config: setSmConfig ? sm_config : undefined,
sm_config,
SR,
tags,
type,

View File

@@ -11,7 +11,7 @@ const { asyncMap } = require('@xen-orchestra/async-map')
const { createLogger } = require('@xen-orchestra/log')
const { decorateClass } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
const { incorrectState } = require('xo-common/api-errors.js')
const { incorrectState, forbiddenOperation } = require('xo-common/api-errors.js')
const { Ref } = require('xen-api')
const extractOpaqueRef = require('./_extractOpaqueRef.js')
@@ -343,7 +343,13 @@ class Vm {
const vm = await this.getRecord('VM', vmRef)
if (!bypassBlockedOperation && 'destroy' in vm.blocked_operations) {
throw new Error('destroy is blocked')
throw forbiddenOperation(
`destroy is blocked: ${
vm.blocked_operations.destroy === 'true'
? 'protected from accidental deletion'
: vm.blocked_operations.destroy
}`
)
}
if (!forceDeleteDefaultTemplate && isDefaultTemplate(vm)) {
@@ -525,11 +531,15 @@ class Vm {
// requires the VM to be halted because it's not possible to re-plug VUSB on a live VM
if (unplugVusbs && isHalted) {
await asyncMap(vm.VUSBs, async ref => {
const vusb = await this.getRecord('VUSB', ref)
await vusb.$call('destroy')
$defer.call(this, 'call', 'VUSB.create', vusb.VM, vusb.USB_group, vusb.other_config)
})
// vm.VUSBs can be undefined (e.g. on XS 7.0.0)
const vusbs = vm.VUSBs
if (vusbs !== undefined) {
await asyncMap(vusbs, async ref => {
const vusb = await this.getRecord('VUSB', ref)
await vusb.$call('destroy')
$defer.call(this, 'call', 'VUSB.create', vusb.VM, vusb.USB_group, vusb.other_config)
})
}
}
let destroyNobakVdis = false

View File

@@ -1,5 +1,84 @@
# ChangeLog
## **5.71.1 (2022-06-13)**
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
### Enhancements
- Show raw errors to administrators instead of _unknown error from the peer_ (PR [#6260](https://github.com/vatesfr/xen-orchestra/pull/6260))
### Bug fixes
- [New SR] Fix `method.startsWith is not a function` when creating an _ext_ SR
- Import VDI content now works when there is a HTTP proxy between XO and the host (PR [#6261](https://github.com/vatesfr/xen-orchestra/pull/6261))
- [Backup] Fix `undefined is not iterable (cannot read property Symbol(Symbol.iterator))` on XS 7.0.0
- [Backup] Ensure a warning is shown if a target preparation step fails (PR [#6266](https://github.com/vatesfr/xen-orchestra/pull/6266))
- [OVA Export] Avoid creating a zombie task (PR [#6267](https://github.com/vatesfr/xen-orchestra/pull/6267))
- [OVA Export] Increase speed by lowering compression to acceptable level (PR [#6267](https://github.com/vatesfr/xen-orchestra/pull/6267))
- [OVA Export] Fix broken OVAs due to special characters in VM name (PR [#6267](https://github.com/vatesfr/xen-orchestra/pull/6267))
### Released packages
- @xen-orchestra/backups 0.25.0
- @xen-orchestra/backups-cli 0.7.3
- xen-api 1.2.1
- @xen-orchestra/xapi 1.2.0
- @xen-orchestra/proxy 0.23.2
- @xen-orchestra/proxy-cli 0.3.0
- xo-cli 0.14.0
- xo-vmdk-to-vhd 2.4.1
- xo-server 5.96.0
- xo-web 5.97.2
## **5.71.0 (2022-05-31)**
### Highlights
- [Backup] _Restore Health Check_ can now be configured to be run automatically during a backup schedule (PRs [#6227](https://github.com/vatesfr/xen-orchestra/pull/6227), [#6228](https://github.com/vatesfr/xen-orchestra/pull/6228), [#6238](https://github.com/vatesfr/xen-orchestra/pull/6238) & [#6242](https://github.com/vatesfr/xen-orchestra/pull/6242))
- [Backup] VMs with USB Pass-through devices are now supported! The advanced _Offline Snapshot Mode_ setting must be enabled. For Full Backup or Disaster Recovery jobs, Rolling Snapshot needs to be anabled as well. (PR [#6239](https://github.com/vatesfr/xen-orchestra/pull/6239))
- [Backup] Implement file cache for listing the backups of a VM (PR [#6220](https://github.com/vatesfr/xen-orchestra/pull/6220))
- [RPU/Host] If some backup jobs are running on the pool, ask for confirmation before starting an RPU, shutdown/rebooting a host or restarting a host's toolstack (PR [6232](https://github.com/vatesfr/xen-orchestra/pull/6232))
- [XO Web] Add ability to configure a default filter for Storage [#6236](https://github.com/vatesfr/xen-orchestra/issues/6236) (PR [#6237](https://github.com/vatesfr/xen-orchestra/pull/6237))
- [REST API] Support VDI creation via VHD import
### Enhancements
- [Backup] Merge multiple VHDs at once which will speed up the merging phase after reducing the retention of a backup job(PR [#6184](https://github.com/vatesfr/xen-orchestra/pull/6184))
- [Backup] Add setting `backups.metadata.defaultSettings.unconditionalSnapshot` in `xo-server`'s configuration file to force a snapshot even when not required by the backup, this is useful to avoid locking the VM halted during the backup (PR [#6221](https://github.com/vatesfr/xen-orchestra/pull/6221))
- [VM migration] Ensure the VM can be migrated before performing the migration to avoid issues [#5301](https://github.com/vatesfr/xen-orchestra/issues/5301) (PR [#6245](https://github.com/vatesfr/xen-orchestra/pull/6245))
- [Backup] Show any detected errors on existing backups instead of fixing them silently (PR [#6207](https://github.com/vatesfr/xen-orchestra/pull/6225))
- Created SRs will now have auto-scan enabled similarly to what XenCenter does (PR [#6246](https://github.com/vatesfr/xen-orchestra/pull/6246))
- [RPU] Disable scheduled backup jobs during RPU (PR [#6244](https://github.com/vatesfr/xen-orchestra/pull/6244))
### Bug fixes
- [S3] Fix S3 remote with empty directory not showing anything to restore (PR [#6218](https://github.com/vatesfr/xen-orchestra/pull/6218))
- [S3] remote fom did not save the `https` and `allow unatuhorized`during remote creation (PR [#6219](https://github.com/vatesfr/xen-orchestra/pull/6219))
- [VM/advanced] Fix various errors when adding ACLs [#6213](https://github.com/vatesfr/xen-orchestra/issues/6213) (PR [#6230](https://github.com/vatesfr/xen-orchestra/pull/6230))
- [Home/Self] Don't make VM's resource set name clickable for non admin users as they aren't allowed to view the Self Service page (PR [#6252](https://github.com/vatesfr/xen-orchestra/pull/6252))
- [load-balancer] Fix density mode failing to shutdown hosts (PR [#6253](https://github.com/vatesfr/xen-orchestra/pull/6253))
- [Health] Make "Too many snapshots" table sortable by number of snapshots (PR [#6255](https://github.com/vatesfr/xen-orchestra/pull/6255))
- [Remote] Show complete errors instead of only a potentially missing message (PR [#6216](https://github.com/vatesfr/xen-orchestra/pull/6216))
### Released packages
- @xen-orchestra/self-signed 0.1.3
- vhd-lib 3.2.0
- @xen-orchestra/fs 1.0.3
- vhd-cli 0.7.2
- xo-vmdk-to-vhd 2.4.0
- @xen-orchestra/upload-ova 0.1.5
- @xen-orchestra/xapi 1.1.0
- @xen-orchestra/backups 0.24.0
- @xen-orchestra/backups-cli 0.7.2
- @xen-orchestra/emit-async 1.0.0
- @xen-orchestra/mixins 0.5.0
- @xen-orchestra/proxy 0.23.1
- xo-server 5.95.0
- xo-web 5.97.1
- xo-server-backup-reports 0.17.0
## 5.70.2 (2022-05-16)
### Bug fixes
@@ -35,7 +114,7 @@
## 5.70.0 (2022-04-29)
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
### Highlights
@@ -73,8 +152,6 @@
## **5.69.2** (2022-04-13)
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
### Enhancements
- [Rolling Pool Update] New algorithm for XCP-ng updates (PR [#6188](https://github.com/vatesfr/xen-orchestra/pull/6188))

View File

@@ -7,19 +7,15 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Backup] Merge multiple VHDs at once which will speed up the merging ĥase after reducing the retention of a backup job(PR [#6184](https://github.com/vatesfr/xen-orchestra/pull/6184))
- [Backup] Implement file cache for listing the backups of a VM (PR [#6220](https://github.com/vatesfr/xen-orchestra/pull/6220))
- [Backup] Add setting `backups.metadata.defaultSettings.unconditionalSnapshot` in `xo-server`'s configuration file to force a snapshot even when not required by the backup, this is useful to avoid locking the VM halted during the backup (PR [#6221](https://github.com/vatesfr/xen-orchestra/pull/6221))
- [XO Web] Add ability to configure a default filter for Storage [#6236](https://github.com/vatesfr/xen-orchestra/issues/6236) (PR [#6237](https://github.com/vatesfr/xen-orchestra/pull/6237))
- [Backup] VMs with USB Pass-through devices are now supported! The advanced _Offline Snapshot Mode_ setting must be enabled. For Full Backup or Disaster Recovery jobs, Rolling Snapshot needs to be anabled as well. (PR [#6239](https://github.com/vatesfr/xen-orchestra/pull/6239))
- [Backup] Merge delta backups without copying data when using VHD directories on NFS/SMB/local remote(https://github.com/vatesfr/xen-orchestra/pull/6271))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [S3] Fix S3 remote with empty directory not showing anything to restore (PR [#6218](https://github.com/vatesfr/xen-orchestra/pull/6218))
- [S3] remote fom did not save the `https` and `allow unatuhorized`during remote creation (PR [#6219](https://github.com/vatesfr/xen-orchestra/pull/6219))
- [VM/advanced] Fix various errors when adding ACLs [#6213](https://github.com/vatesfr/xen-orchestra/issues/6213) (PR [#6230](https://github.com/vatesfr/xen-orchestra/pull/6230))
- [VDI Import] Fix `this._getOrWaitObject is not a function`
- [VM] Attempting to delete a protected VM should display a modal with the error and the ability to bypass it (PR [#6290](https://github.com/vatesfr/xen-orchestra/pull/6290))
- [OVA Import] Fix import stuck after first disk
### Packages to release
@@ -32,22 +28,18 @@
> - patch: if the change is a bug fix or a simple code improvement
> - minor: if the change is a new feature
> - major: if the change breaks compatibility
>
> Keep this list alphabetically ordered to avoid merge conflicts
<!--packages-start-->
- @xen-orchestra/self-signed patch
- vhd-lib patch
- @xen-orchestra/fs patch
- vhd-cli patch
- xo-vmdk-to-vhd minor
- @xen-orchestra/upload-ova patch
- @vates/event-listeners-manager patch
- @vates/read-chunk major
- @xen-orchestra/backups minor
- @xen-orchestra/backups-cli patch
- @xen-orchestra/emit-async major
- @xen-orchestra/mixins minor
- @xen-orchestra/proxy minor
- @xen-orchestra/xapi minor
- vhd-lib minor
- xo-remote-parser minor
- xo-server minor
- xo-web minor
- xo-server-backup-reports minor
- xo-vmdk-to-vhd patch
<!--packages-end-->

View File

@@ -99,3 +99,38 @@ To solve this issue, we recommend that you:
- wait until the other backup job is completed/the merge process is done
- make sure your remote storage is not being overworked
## Error: HTTP connection has timed out
This error occurs when XO tries to fetch data from a host, via the HTTP GET method. This error essentially means that the host (dom0 specifically) isn't responding anymore, after we asked it to expose the disk to be exported. This could be a symptom of having an overloaded dom0 that couldn't respond fast enough. It can also be caused by dom0 having trouble attaching the disk in question to expose it for fetching via HTTP, or just not having enough resources to answer our GET request.
::: warning
As a temporary workaround you can increase the timeout higher than the default value, to allow the host more time to respond. But you will need to eventually diagnose the root cause of the slow host response or else you risk the issue returning.
:::
Create the following file:
```
/etc/xo-server/config.httpInactivityTimeout.toml
```
Add the following lines:
```
# XOA Support - Work-around HTTP timeout issue during backups
[xapiOptions]
httpInactivityTimeout = 1800000 # 30 mins
```
## Error: Expected values to be strictly equal
This error occurs at the end of the transfer. XO checks the exported VM disk integrity, to ensure it's a valid VHD file (we check the VHD header as well as the footer of the received file). This error means the header and footage did not match, so the file is incomplete (likely the export from dom0 failed at some point and we only received a partial HD/VM disk).
## Error: the job is already running
This means the same job is still running, typically from the last scheduled run. This happens when you have a backup job scheduled too often. It can also occur if you have a long timeout configured for the job, and a slow VM export or slow transfer to your remote. In either case, you need to adjust your backup schedule to allow time for the job to finish or timeout before the next scheduled run. We consider this an error to ensure you'll be notified that the planned schedule won't run this time because the previous one isn't finished.
## Error: VDI_IO_ERROR
This error comes directly from your host/dom0, and not XO. Essentially, XO asked the host to expose a VM disk to export via HTTP (as usual), XO managed to make the HTTP GET connection, and even start the transfer. But then at some point the host couldn't read the VM disk any further, causing this error on the host side. This might happen if the VDI is corrupted on the storage, or if there's a race condition during snapshots. More rarely, this can also occur if your SR is just too slow to keep up with the export as well as live VM traffic.
## Error: no XAPI associated to <UUID>
This message means that XO had a UUID of a VM to backup, but when the job ran it couldn't find any object matching it. This could be caused by the pool where this VM lived no longer being connected to XO. Double-check that the pool hosting the VM is currently connected under Settings > Servers. You can also search for the VM UUID in the Home > VMs search bar. If you can see it, run the backup job again and it will work. If you cannot, either the VM was removed or the pool is not connected.

View File

@@ -66,12 +66,13 @@ You shouldn't have to change this. It's the path where `xo-web` files are served
## Custom certificate authority
If you use certificates signed by an in-house CA for your XenServer hosts, and want to have Xen Orchestra connect to them without rejection, you need to add the `--use-openssl-ca` option in Node, but also add the CA to your trust store (`/etc/ssl/certs` via `update-ca-certificates` in your XOA).
If you use certificates signed by an in-house CA for your XCP-ng or XenServer hosts, and want to have Xen Orchestra connect to them without rejection, you can use the [`NODE_EXTRA_CA_CERTS`](https://nodejs.org/api/cli.html#cli_node_extra_ca_certs_file) environment variable.
To enable this option in your XOA, edit the `/etc/systemd/system/xo-server.service` file and add this:
To enable this option in your XOA, create `/etc/systemd/system/xo-server.service.d/ca.conf` with the following content:
```
Environment=NODE_OPTIONS=--use-openssl-ca
[Service]
Environment=NODE_EXTRA_CA_CERTS=/usr/local/share/ca-certificates/my-cert.crt
```
Don't forget to reload `systemd` conf and restart `xo-server`:
@@ -81,9 +82,7 @@ Don't forget to reload `systemd` conf and restart `xo-server`:
# systemctl restart xo-server.service
```
:::tip
The `--use-openssl-ca` option is ignored by Node if Xen-Orchestra is run with Linux capabilities. Capabilities are commonly used to bind applications to privileged ports (<1024) (i.e. `CAP_NET_BIND_SERVICE`). Local NAT rules (`iptables`) or a reverse proxy would be required to use privileged ports and a custom certficate authority.
:::
> For XO Proxy, the process is almost the same except the file to create is `/etc/systemd/system/xo-proxy.service.d/ca.conf` and the service to restart is `xo-proxy.service`.
## Redis server

View File

@@ -18,6 +18,8 @@ If you lose your main pool, you can start the copy on the other side, with very
:::warning
It is normal that you can't boot the copied VM directly: we protect it. The normal workflow is to make a clone and then work on it.
This also affects VMs with "Auto Power On" enabled, because of our protections you can ensure these won't start on your CR destination if you happen to reboot it.
:::
## Configure it

View File

@@ -35,3 +35,7 @@ A higher retention number will lead to huge space occupation on your SR.
If you boot a copy of your production VM, be careful: if they share the same static IP, you'll have troubles.
A good way to avoid this kind of problem is to remove the network interface on the DR VM and check if the export is correctly done.
:::warning
For each DR replicated VM, we add "start" as a blocked operation, meaning even VMs with "Auto power on" enabled will not be started on your DR destination if it reboots.
:::

View File

@@ -141,6 +141,28 @@ curl \
> myDisk.vhd
```
## VDI Import
A VHD can be imported on an SR to create a VDI at `/rest/v0/srs/<sr uuid>/vdis`.
```bash
curl \
-X POST \
-b authenticationToken=KQxQdm2vMiv7jBIK0hgkmgxKzemd8wSJ7ugFGKFkTbs \
-T myDisk.vhd \
'https://xo.example.org/rest/v0/srs/357bd56c-71f9-4b2a-83b8-3451dec04b8f/vdis?name_label=my_imported_VDI' \
| cat
```
> Note: the final `| cat` ensures cURL's standard output is not a TTY, which is necessary for upload stats to be dislayed.
This request returns the UUID of the created VDI.
The following query parameters are supported to customize the created VDI:
- `name_label`
- `name_description`
## The future
We are adding features and improving the REST API step by step. If you have interesting use cases or feedback, please ask directly at <https://xcp-ng.org/forum/category/12/xen-orchestra>

View File

@@ -60,6 +60,7 @@
"testEnvironment": "node",
"testPathIgnorePatterns": [
"/@vates/decorate-with/",
"/@vates/event-listeners-manager/",
"/@vates/predicates/",
"/@xen-orchestra/audit-core/",
"/dist/",

View File

@@ -1,3 +0,0 @@
'use strict'
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -1 +0,0 @@
../../scripts/babel-eslintrc.js

View File

@@ -0,0 +1,14 @@
'use strict'
const { parse } = require('./')
const { ast, pattern } = require('./index.fixtures')
module.exports = ({ benchmark }) => {
benchmark('parse', () => {
parse(pattern)
})
benchmark('toString', () => {
ast.toString()
})
}

View File

@@ -1,8 +1,10 @@
import * as CM from './'
'use strict'
export const pattern = 'foo !"\\\\ \\"" name:|(wonderwoman batman) hasCape? age:32 chi*go /^foo\\/bar\\./i'
const CM = require('./')
export const ast = new CM.And([
exports.pattern = 'foo !"\\\\ \\"" name:|(wonderwoman batman) hasCape? age:32 chi*go /^foo\\/bar\\./i'
exports.ast = new CM.And([
new CM.String('foo'),
new CM.Not(new CM.String('\\ "')),
new CM.Property('name', new CM.Or([new CM.String('wonderwoman'), new CM.String('batman')])),

View File

@@ -1,4 +1,6 @@
import { escapeRegExp, isPlainObject, some } from 'lodash'
'use strict'
const { escapeRegExp, isPlainObject, some } = require('lodash')
// ===================================================================
@@ -23,7 +25,7 @@ class Node {
}
}
export class Null extends Node {
class Null extends Node {
match() {
return true
}
@@ -32,10 +34,11 @@ export class Null extends Node {
return ''
}
}
exports.Null = Null
const formatTerms = terms => terms.map(term => term.toString(true)).join(' ')
export class And extends Node {
class And extends Node {
constructor(children) {
super()
@@ -54,8 +57,9 @@ export class And extends Node {
return isNested ? `(${terms})` : terms
}
}
exports.And = And
export class Comparison extends Node {
class Comparison extends Node {
constructor(operator, value) {
super()
this._comparator = Comparison.comparators[operator]
@@ -71,6 +75,7 @@ export class Comparison extends Node {
return this._operator + String(this._value)
}
}
exports.Comparison = Comparison
Comparison.comparators = {
'>': (a, b) => a > b,
'>=': (a, b) => a >= b,
@@ -78,7 +83,7 @@ Comparison.comparators = {
'<=': (a, b) => a <= b,
}
export class Or extends Node {
class Or extends Node {
constructor(children) {
super()
@@ -96,8 +101,9 @@ export class Or extends Node {
return `|(${formatTerms(this.children)})`
}
}
exports.Or = Or
export class Not extends Node {
class Not extends Node {
constructor(child) {
super()
@@ -112,8 +118,9 @@ export class Not extends Node {
return '!' + this.child.toString(true)
}
}
exports.Not = Not
export class NumberNode extends Node {
exports.Number = exports.NumberNode = class NumberNode extends Node {
constructor(value) {
super()
@@ -133,9 +140,8 @@ export class NumberNode extends Node {
return String(this.value)
}
}
export { NumberNode as Number }
export class NumberOrStringNode extends Node {
class NumberOrStringNode extends Node {
constructor(value) {
super()
@@ -160,9 +166,9 @@ export class NumberOrStringNode extends Node {
return this.value
}
}
export { NumberOrStringNode as NumberOrString }
exports.NumberOrString = exports.NumberOrStringNode = NumberOrStringNode
export class Property extends Node {
class Property extends Node {
constructor(name, child) {
super()
@@ -178,12 +184,13 @@ export class Property extends Node {
return `${formatString(this.name)}:${this.child.toString(true)}`
}
}
exports.Property = Property
const escapeChar = char => '\\' + char
const formatString = value =>
Number.isNaN(+value) ? (isRawString(value) ? value : `"${value.replace(/\\|"/g, escapeChar)}"`) : `"${value}"`
export class GlobPattern extends Node {
class GlobPattern extends Node {
constructor(value) {
// fallback to string node if no wildcard
if (value.indexOf('*') === -1) {
@@ -216,8 +223,9 @@ export class GlobPattern extends Node {
return this.value
}
}
exports.GlobPattern = GlobPattern
export class RegExpNode extends Node {
class RegExpNode extends Node {
constructor(pattern, flags) {
super()
@@ -245,9 +253,9 @@ export class RegExpNode extends Node {
return this.re.toString()
}
}
export { RegExpNode as RegExp }
exports.RegExp = RegExpNode
export class StringNode extends Node {
class StringNode extends Node {
constructor(value) {
super()
@@ -275,9 +283,9 @@ export class StringNode extends Node {
return formatString(this.value)
}
}
export { StringNode as String }
exports.String = exports.StringNode = StringNode
export class TruthyProperty extends Node {
class TruthyProperty extends Node {
constructor(name) {
super()
@@ -292,6 +300,7 @@ export class TruthyProperty extends Node {
return formatString(this.name) + '?'
}
}
exports.TruthyProperty = TruthyProperty
// -------------------------------------------------------------------
@@ -531,7 +540,7 @@ const parser = P.grammar({
),
ws: P.regex(/\s*/),
}).default
export const parse = parser.parse.bind(parser)
exports.parse = parser.parse.bind(parser)
// -------------------------------------------------------------------
@@ -573,7 +582,7 @@ const _getPropertyClauseStrings = ({ child }) => {
}
// Find possible values for property clauses in a and clause.
export const getPropertyClausesStrings = node => {
exports.getPropertyClausesStrings = function getPropertyClausesStrings(node) {
if (!node) {
return {}
}
@@ -605,7 +614,7 @@ export const getPropertyClausesStrings = node => {
// -------------------------------------------------------------------
export const setPropertyClause = (node, name, child) => {
exports.setPropertyClause = function setPropertyClause(node, name, child) {
const property = child && new Property(name, typeof child === 'string' ? new StringNode(child) : child)
if (node === undefined) {

View File

@@ -1,7 +1,9 @@
/* eslint-env jest */
import { ast, pattern } from './index.fixtures'
import {
'use strict'
const { ast, pattern } = require('./index.fixtures')
const {
getPropertyClausesStrings,
GlobPattern,
Null,
@@ -11,7 +13,7 @@ import {
Property,
setPropertyClause,
StringNode,
} from './'
} = require('./')
it('getPropertyClausesStrings', () => {
const tmp = getPropertyClausesStrings(parse('foo bar:baz baz:|(foo bar /^boo$/ /^far$/) foo:/^bar$/'))

View File

@@ -16,7 +16,6 @@
"url": "https://vates.fr"
},
"preferGlobal": false,
"main": "dist/",
"browserslist": [
">2%"
],
@@ -26,21 +25,7 @@
"dependencies": {
"lodash": "^4.17.4"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -1,12 +0,0 @@
import { parse } from './'
import { ast, pattern } from './index.fixtures'
export default ({ benchmark }) => {
benchmark('parse', () => {
parse(pattern)
})
benchmark('toString', () => {
ast.toString()
})
}

View File

@@ -1,3 +0,0 @@
'use strict'
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -1 +0,0 @@
../../scripts/babel-eslintrc.js

View File

@@ -1,3 +1,5 @@
'use strict'
const match = (pattern, value) => {
if (Array.isArray(pattern)) {
return (
@@ -43,4 +45,6 @@ const match = (pattern, value) => {
return pattern === value
}
export const createPredicate = pattern => value => match(pattern, value)
exports.createPredicate = function createPredicate(pattern) {
return value => match(pattern, value)
}

View File

@@ -16,27 +16,13 @@
"url": "https://vates.fr"
},
"preferGlobal": false,
"main": "dist/",
"browserslist": [
">2%"
],
"engines": {
"node": ">=6"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "vhd-cli",
"version": "0.7.1",
"version": "0.7.2",
"license": "ISC",
"description": "Tools to read/create and merge VHD files",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-cli",
@@ -24,14 +24,14 @@
"node": ">=8.10"
},
"dependencies": {
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/fs": "^1.0.3",
"cli-progress": "^3.1.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"human-format": "^1.0.0",
"lodash": "^4.17.21",
"uuid": "^8.3.2",
"vhd-lib": "^3.1.0"
"vhd-lib": "^3.2.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -104,7 +104,7 @@ exports.VhdAbstract = class VhdAbstract {
*
* @returns {number} the merged data size
*/
async coalesceBlock(child, blockId) {
async mergeBlock(child, blockId) {
const block = await child.readBlock(blockId)
await this.writeEntireBlock(block)
return block.data.length

View File

@@ -53,19 +53,25 @@ test('Can coalesce block', async () => {
const childDirectoryVhd = yield openVhd(handler, childDirectoryName)
await childDirectoryVhd.readBlockAllocationTable()
await parentVhd.coalesceBlock(childFileVhd, 0)
let childBlockData = (await childDirectoryVhd.readBlock(0)).data
await parentVhd.mergeBlock(childDirectoryVhd, 0)
await parentVhd.writeFooter()
await parentVhd.writeBlockAllocationTable()
let parentBlockData = (await parentVhd.readBlock(0)).data
let childBlockData = (await childFileVhd.readBlock(0)).data
// block should be present in parent
expect(parentBlockData.equals(childBlockData)).toEqual(true)
// block should not be in child since it's a rename for vhd directory
await expect(childDirectoryVhd.readBlock(0)).rejects.toThrowError()
await parentVhd.coalesceBlock(childDirectoryVhd, 0)
childBlockData = (await childFileVhd.readBlock(1)).data
await parentVhd.mergeBlock(childFileVhd, 1)
await parentVhd.writeFooter()
await parentVhd.writeBlockAllocationTable()
parentBlockData = (await parentVhd.readBlock(0)).data
childBlockData = (await childDirectoryVhd.readBlock(0)).data
expect(parentBlockData).toEqual(childBlockData)
parentBlockData = (await parentVhd.readBlock(1)).data
// block should be present in parent in case of mixed vhdfile/vhddirectory
expect(parentBlockData.equals(childBlockData)).toEqual(true)
// block should still be child
await childFileVhd.readBlock(1)
})
})

View File

@@ -142,13 +142,13 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
return test(this.#blockTable, blockId)
}
_getChunkPath(partName) {
#getChunkPath(partName) {
return this._path + '/' + partName
}
async _readChunk(partName) {
// here we can implement compression and / or crypto
const buffer = await this._handler.readFile(this._getChunkPath(partName))
const buffer = await this._handler.readFile(this.#getChunkPath(partName))
const uncompressed = await this.#compressor.decompress(buffer)
return {
@@ -164,16 +164,20 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
)
const compressed = await this.#compressor.compress(buffer)
return this._handler.outputFile(this._getChunkPath(partName), compressed, this._opts)
return this._handler.outputFile(this.#getChunkPath(partName), compressed, this._opts)
}
// put block in subdirectories to limit impact when doing directory listing
_getBlockPath(blockId) {
#getBlockPath(blockId) {
const blockPrefix = Math.floor(blockId / 1e3)
const blockSuffix = blockId - blockPrefix * 1e3
return `blocks/${blockPrefix}/${blockSuffix}`
}
_getFullBlockPath(blockId) {
return this.#getChunkPath(this.#getBlockPath(blockId))
}
async readHeaderAndFooter() {
await this.#readChunkFilters()
@@ -200,7 +204,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
if (onlyBitmap) {
throw new Error(`reading 'bitmap of block' ${blockId} in a VhdDirectory is not implemented`)
}
const { buffer } = await this._readChunk(this._getBlockPath(blockId))
const { buffer } = await this._readChunk(this.#getBlockPath(blockId))
return {
id: blockId,
bitmap: buffer.slice(0, this.bitmapSize),
@@ -240,25 +244,39 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
}
// only works if data are in the same handler
// and if the full block is modified in child ( which is the case whit xcp)
// and if the full block is modified in child ( which is the case with xcp)
// and if the compression type is same on both sides
async coalesceBlock(child, blockId) {
async mergeBlock(child, blockId, isResumingMerge = false) {
const childBlockPath = child._getFullBlockPath?.(blockId)
if (
!(child instanceof VhdDirectory) ||
childBlockPath !== undefined ||
this._handler !== child._handler ||
child.compressionType !== this.compressionType
child.compressionType !== this.compressionType ||
child.compressionType === 'MIXED'
) {
return super.coalesceBlock(child, blockId)
return super.mergeBlock(child, blockId)
}
await this._handler.copy(
child._getChunkPath(child._getBlockPath(blockId)),
this._getChunkPath(this._getBlockPath(blockId))
)
try {
await this._handler.rename(childBlockPath, this._getFullBlockPath(blockId))
} catch (error) {
if (error.code === 'ENOENT' && isResumingMerge === true) {
// when resuming, the blocks moved since the last merge state write are
// not in the child anymore but it should be ok
// it will throw an error if block is missing in parent
// won't detect if the block was already in parent and is broken/missing in child
const { data } = await this.readBlock(blockId)
assert.strictEqual(data.length, this.header.blockSize)
} else {
throw error
}
}
setBitmap(this.#blockTable, blockId)
return sectorsToBytes(this.sectorsPerBlock)
}
async writeEntireBlock(block) {
await this._writeChunk(this._getBlockPath(block.id), block.buffer)
await this._writeChunk(this.#getBlockPath(block.id), block.buffer)
setBitmap(this.#blockTable, block.id)
}

View File

@@ -222,14 +222,14 @@ test('Can coalesce block', async () => {
const childDirectoryVhd = yield openVhd(handler, childDirectoryName)
await childDirectoryVhd.readBlockAllocationTable()
await parentVhd.coalesceBlock(childFileVhd, 0)
await parentVhd.mergeBlock(childFileVhd, 0)
await parentVhd.writeFooter()
await parentVhd.writeBlockAllocationTable()
let parentBlockData = (await parentVhd.readBlock(0)).data
let childBlockData = (await childFileVhd.readBlock(0)).data
expect(parentBlockData).toEqual(childBlockData)
await parentVhd.coalesceBlock(childDirectoryVhd, 0)
await parentVhd.mergeBlock(childDirectoryVhd, 0)
await parentVhd.writeFooter()
await parentVhd.writeBlockAllocationTable()
parentBlockData = (await parentVhd.readBlock(0)).data

View File

@@ -43,6 +43,16 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
}
}
get compressionType() {
const compressionType = this.vhds[0].compressionType
for (let i = 0; i < this.vhds.length; i++) {
if (compressionType !== this.vhds[i].compressionType) {
return 'MIXED'
}
}
return compressionType
}
/**
* @param {Array<VhdAbstract>} vhds the chain of Vhds used to compute this Vhd, from the deepest child (in position 0), to the root (in the last position)
* only the last one can have any type. Other must have type DISK_TYPES.DIFFERENCING (delta)
@@ -74,17 +84,28 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
}
}
async readBlock(blockId, onlyBitmap = false) {
#getVhdWithBlock(blockId) {
const index = this.#vhds.findIndex(vhd => vhd.containsBlock(blockId))
assert(index !== -1, `no such block ${blockId}`)
return this.#vhds[index]
}
async readBlock(blockId, onlyBitmap = false) {
// only read the content of the first vhd containing this block
return await this.#vhds[index].readBlock(blockId, onlyBitmap)
return await this.#getVhdWithBlock(blockId).readBlock(blockId, onlyBitmap)
}
async mergeBlock(child, blockId) {
throw new Error(`can't coalesce block into a vhd synthetic`)
}
_readParentLocatorData(id) {
return this.#vhds[this.#vhds.length - 1]._readParentLocatorData(id)
}
_getFullBlockPath(blockId) {
const vhd = this.#getVhdWithBlock(blockId)
return vhd?._getFullBlockPath(blockId)
}
}
// add decorated static method

View File

@@ -6,7 +6,8 @@ exports.checkVhdChain = require('./checkChain')
exports.createReadableSparseStream = require('./createReadableSparseStream')
exports.createVhdStreamWithLength = require('./createVhdStreamWithLength')
exports.createVhdDirectoryFromStream = require('./createVhdDirectoryFromStream').createVhdDirectoryFromStream
exports.mergeVhd = require('./merge')
const { mergeVhd } = require('./merge')
exports.mergeVhd = mergeVhd
exports.peekFooterFromVhdStream = require('./peekFooterFromVhdStream')
exports.openVhd = require('./openVhd').openVhd
exports.VhdAbstract = require('./Vhd/VhdAbstract').VhdAbstract

View File

@@ -9,6 +9,7 @@ const { getHandler } = require('@xen-orchestra/fs')
const { pFromCallback } = require('promise-toolbox')
const { VhdFile, chainVhd, mergeVhd } = require('./index')
const { _cleanupVhds: cleanupVhds } = require('./merge')
const { checkFile, createRandomFile, convertFromRawToVhd } = require('./tests/utils')
@@ -38,14 +39,15 @@ test('merge works in normal cases', async () => {
await createRandomFile(`${tempDir}/${childRandomFileName}`, mbOfChildren)
await convertFromRawToVhd(`${tempDir}/${childRandomFileName}`, `${tempDir}/${child1FileName}`)
await chainVhd(handler, parentFileName, handler, child1FileName, true)
await checkFile(`${tempDir}/${parentFileName}`)
// merge
await mergeVhd(handler, parentFileName, handler, child1FileName)
// check that vhd is still valid
await checkFile(`${tempDir}/${parentFileName}`)
// check that the merged vhd is still valid
await checkFile(`${tempDir}/${child1FileName}`)
const parentVhd = new VhdFile(handler, parentFileName)
const parentVhd = new VhdFile(handler, child1FileName)
await parentVhd.readHeaderAndFooter()
await parentVhd.readBlockAllocationTable()
@@ -138,11 +140,11 @@ test('it can resume a merge ', async () => {
await mergeVhd(handler, 'parent.vhd', handler, 'child1.vhd')
// reload header footer and block allocation table , they should succed
await parentVhd.readHeaderAndFooter()
await parentVhd.readBlockAllocationTable()
await childVhd.readHeaderAndFooter()
await childVhd.readBlockAllocationTable()
let offset = 0
// check that the data are the same as source
for await (const block of parentVhd.blocks()) {
for await (const block of childVhd.blocks()) {
const blockContent = block.data
// first block is marked as already merged, should not be modified
// second block should come from children
@@ -153,7 +155,7 @@ test('it can resume a merge ', async () => {
await fs.read(fd, buffer, 0, buffer.length, offset)
expect(buffer.equals(blockContent)).toEqual(true)
offset += parentVhd.header.blockSize
offset += childVhd.header.blockSize
}
})
@@ -183,9 +185,9 @@ test('it merge multiple child in one pass ', async () => {
await mergeVhd(handler, parentFileName, handler, [grandChildFileName, childFileName])
// check that vhd is still valid
await checkFile(parentFileName)
await checkFile(grandChildFileName)
const parentVhd = new VhdFile(handler, parentFileName)
const parentVhd = new VhdFile(handler, grandChildFileName)
await parentVhd.readHeaderAndFooter()
await parentVhd.readBlockAllocationTable()
@@ -206,3 +208,21 @@ test('it merge multiple child in one pass ', async () => {
offset += parentVhd.header.blockSize
}
})
test('it cleans vhd mergedfiles', async () => {
const handler = getHandler({ url: `file://${tempDir}` })
await handler.writeFile('parent', 'parentData')
await handler.writeFile('child1', 'child1Data')
await handler.writeFile('child2', 'child2Data')
await handler.writeFile('child3', 'child3Data')
// childPath is from the grand children to the children
await cleanupVhds(handler, 'parent', ['child3', 'child2', 'child1'], { remove: true })
// only child3 should stay, with the data of parent
const [child3, ...other] = await handler.list('.')
expect(other.length).toEqual(0)
expect(child3).toEqual('child3')
expect((await handler.readFile('child3')).toString('utf8')).toEqual('parentData')
})

View File

@@ -12,11 +12,35 @@ const { basename, dirname } = require('path')
const { DISK_TYPES } = require('./_constants')
const { Disposable } = require('promise-toolbox')
const { asyncEach } = require('@vates/async-each')
const { VhdAbstract } = require('./Vhd/VhdAbstract')
const { VhdDirectory } = require('./Vhd/VhdDirectory')
const { VhdSynthetic } = require('./Vhd/VhdSynthetic')
const { asyncMap } = require('@xen-orchestra/async-map')
const { warn } = createLogger('vhd-lib:merge')
// the chain we want to merge is [ ancestor, child1, ..., childn]
// this chain can have grand children or grand parent
//
// 1. Create a VhdSynthetic from all children if more than 1 child are merged
// 2. Merge the resulting vhd into the ancestor
// 2.a if at least one is a file : copy file part from child to parent
// 2.b if they are all vhd directory : move blocks from children to the ancestor
// 3. update the size, uuid and timestamp of the ancestor with those of child n
// 3. Delete all (now) unused VHDs
// 4. Rename the ancestor to to child n
//
// VhdSynthetic
// |
// /‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾\
// [ ancestor, child1, ...,child n-1, child n ]
// | \___________________/ ^
// | | |
// | unused VHDs |
// | |
// \___________rename_____________/
// write the merge progress file at most every `delay` seconds
function makeThrottledWriter(handler, path, delay) {
let lastWrite = Date.now()
return async json => {
@@ -28,21 +52,45 @@ function makeThrottledWriter(handler, path, delay) {
}
}
// make the rename / delete part of the merge process
// will fail if parent and children are in different remote
function cleanupVhds(handler, parent, children, { logInfo = noop, remove = false } = {}) {
if (!Array.isArray(children)) {
children = [children]
}
const mergeTargetChild = children.shift()
return Promise.all([
VhdAbstract.rename(handler, parent, mergeTargetChild),
asyncMap(children, child => {
logInfo(`the VHD child is already merged`, { child })
if (remove) {
logInfo(`deleting merged VHD child`, { child })
return VhdAbstract.unlink(handler, child)
}
}),
])
}
module.exports._cleanupVhds = cleanupVhds
// Merge one or multiple vhd child into vhd parent.
// childPath can be array to create a synthetic VHD from multiple VHDs
// childPath is from the grand children to the children
//
// TODO: rename the VHD file during the merge
module.exports = limitConcurrency(2)(async function merge(
module.exports.mergeVhd = limitConcurrency(2)(async function merge(
parentHandler,
parentPath,
childHandler,
childPath,
{ onProgress = noop } = {}
{ onProgress = noop, logInfo = noop, remove } = {}
) {
const mergeStatePath = dirname(parentPath) + '/' + '.' + basename(parentPath) + '.merge.json'
return await Disposable.use(async function* () {
let mergeState
let isResuming = false
try {
const mergeStateContent = await parentHandler.readFile(mergeStatePath)
mergeState = JSON.parse(mergeStateContent)
@@ -75,6 +123,7 @@ module.exports = limitConcurrency(2)(async function merge(
assert.strictEqual(childVhd.footer.diskType, DISK_TYPES.DIFFERENCING)
assert.strictEqual(childVhd.header.blockSize, parentVhd.header.blockSize)
} else {
isResuming = true
// vhd should not have changed to resume
assert.strictEqual(parentVhd.header.checksum, mergeState.parent.header)
assert.strictEqual(childVhd.header.checksum, mergeState.child.header)
@@ -115,12 +164,12 @@ module.exports = limitConcurrency(2)(async function merge(
let counter = 0
const mergeStateWriter = makeThrottledWriter(parentHandler, mergeStatePath, 10e3)
await asyncEach(
toMerge,
async blockId => {
merging.add(blockId)
mergeState.mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
mergeState.mergedDataSize += await parentVhd.mergeBlock(childVhd, blockId, isResuming)
merging.delete(blockId)
onProgress({
@@ -155,6 +204,8 @@ module.exports = limitConcurrency(2)(async function merge(
// should be a disposable
parentHandler.unlink(mergeStatePath).catch(warn)
await cleanupVhds(parentHandler, parentPath, childPath, { logInfo, remove })
return mergeState.mergedDataSize
})
})

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "vhd-lib",
"version": "3.1.0",
"version": "3.2.0",
"license": "AGPL-3.0-or-later",
"description": "Primitives for VHD file handling",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
@@ -29,7 +29,7 @@
"uuid": "^8.3.1"
},
"devDependencies": {
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/fs": "^1.0.3",
"execa": "^5.0.0",
"get-stream": "^6.0.0",
"rimraf": "^3.0.2",

View File

@@ -3,11 +3,10 @@
const { readChunk } = require('@vates/read-chunk')
const { FOOTER_SIZE } = require('./_constants')
const { fuFooter } = require('./_structs')
const { unpackFooter } = require('./Vhd/_utils.js')
module.exports = async function peekFooterFromStream(stream) {
const footerBuffer = await readChunk(stream, FOOTER_SIZE)
const footer = fuFooter.unpack(footerBuffer)
stream.unshift(footerBuffer)
return footer
const buffer = await readChunk(stream, FOOTER_SIZE)
stream.unshift(buffer)
return unpackFooter(buffer)
}

View File

@@ -85,10 +85,9 @@ async function convertToVhdDirectory(rawFileName, vhdFileName, path) {
await fs.mkdir(path + '/blocks/0/')
const stats = await fs.stat(rawFileName)
const sizeMB = stats.size / 1024 / 1024
for (let i = 0, offset = 0; i < sizeMB; i++, offset += blockDataSize) {
for (let i = 0, offset = 0; offset < stats.size; i++, offset += blockDataSize) {
const blockData = Buffer.alloc(blockDataSize)
await fs.read(srcRaw, blockData, offset)
await fs.read(srcRaw, blockData, 0, blockData.length, offset)
await fs.writeFile(path + '/blocks/0/' + i, Buffer.concat([bitmap, blockData]))
}
await fs.close(srcRaw)

View File

@@ -40,7 +40,7 @@
"human-format": "^1.0.0",
"lodash": "^4.17.4",
"pw": "^0.0.4",
"xen-api": "^1.2.0"
"xen-api": "^1.2.1"
},
"devDependencies": {
"@babel/cli": "^7.1.5",

View File

@@ -8,6 +8,6 @@
"promise-toolbox": "^0.19.2",
"readable-stream": "^3.1.1",
"throttle": "^1.0.3",
"vhd-lib": "^3.1.0"
"vhd-lib": "^3.2.0"
}
}

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xen-api",
"version": "1.2.0",
"version": "1.2.1",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [

View File

@@ -465,6 +465,8 @@ export class Xapi extends EventEmitter {
await this._setHostAddressInUrl(url, host)
const doRequest = httpRequest.put.bind(undefined, $cancelToken, {
agent: this.httpAgent,
body,
headers,
rejectUnauthorized: !this._allowUnauthorized,
@@ -486,7 +488,6 @@ export class Xapi extends EventEmitter {
query: 'task_id' in query ? omit(query, 'task_id') : query,
maxRedirects: 0,
agent: this.httpAgent,
}).then(
response => {
response.cancel()

View File

@@ -1,12 +1,12 @@
import { ensureDir as mkdirp } from 'fs-extra'
import { readFile, writeFile } from 'fs/promises'
import { xdgConfig } from 'xdg-basedir'
import lodashGet from 'lodash/get.js'
import lodashUnset from 'lodash/unset.js'
import xdgBasedir from 'xdg-basedir'
// ===================================================================
const configPath = xdgBasedir.config + '/xo-cli'
const configPath = xdgConfig + '/xo-cli'
const configFile = configPath + '/config.json'
// ===================================================================

View File

@@ -75,13 +75,23 @@ async function parseRegisterArgs(args) {
}
}
async function _createToken({ allowUnauthorized, email, expiresIn, password, url }) {
async function _createToken({ allowUnauthorized, description, email, expiresIn, password, url }) {
const xo = new Xo({ rejectUnauthorized: !allowUnauthorized, url })
await xo.open()
await xo.signIn({ email, password })
console.warn('Successfully logged with', xo.user.email)
try {
await xo.signIn({ email, password })
console.warn('Successfully logged with', xo.user.email)
return await xo.call('token.create', { expiresIn })
return await xo.call('token.create', { description, expiresIn }).catch(error => {
// if invalid parameter error, retry without description for backward compatibility
if (error.code === 10) {
return xo.call('token.create', { expiresIn })
}
throw error
})
} finally {
await xo.close()
}
}
function createOutputStream(path) {
@@ -272,7 +282,10 @@ function main(args) {
COMMANDS.help = help
async function createToken(args) {
const token = await _createToken(await parseRegisterArgs(args))
const opts = await parseRegisterArgs(args)
opts.description = 'xo-cli --createToken'
const token = await _createToken(opts)
console.warn('Authentication token created')
console.warn()
console.log(token)
@@ -281,6 +294,7 @@ COMMANDS.createToken = createToken
async function register(args) {
const opts = await parseRegisterArgs(args)
opts.description = 'xo-cli --register'
await config.set({
allowUnauthorized: opts.allowUnauthorized,
@@ -397,64 +411,67 @@ async function call(args) {
delete params['@']
const xo = await connect()
// FIXME: do not use private properties.
const baseUrl = xo._url.replace(/^ws/, 'http')
const httpOptions = {
rejectUnauthorized: !(await config.load()).allowUnauthorized,
}
const result = await xo.call(method, params)
let keys, key, url
if (isObject(result) && (keys = getKeys(result)).length === 1) {
key = keys[0]
if (key === '$getFrom') {
ensurePathParam(method, file)
url = new URL(result[key], baseUrl)
const output = createOutputStream(file)
const response = await hrp(url, httpOptions)
const progress = progressStream(
{
length: response.headers['content-length'],
time: 1e3,
},
printProgress
)
return fromCallback(pipeline, response, progress, output)
try {
// FIXME: do not use private properties.
const baseUrl = xo._url.replace(/^ws/, 'http')
const httpOptions = {
rejectUnauthorized: !(await config.load()).allowUnauthorized,
}
if (key === '$sendTo') {
ensurePathParam(method, file)
url = new URL(result[key], baseUrl)
const result = await xo.call(method, params)
let keys, key, url
if (isObject(result) && (keys = getKeys(result)).length === 1) {
key = keys[0]
const { size: length } = await stat(file)
const input = pipeline(
createReadStream(file),
progressStream(
if (key === '$getFrom') {
ensurePathParam(method, file)
url = new URL(result[key], baseUrl)
const output = createOutputStream(file)
const response = await hrp(url, httpOptions)
const progress = progressStream(
{
length,
length: response.headers['content-length'],
time: 1e3,
},
printProgress
),
noop
)
)
return hrp
.post(url, httpOptions, {
body: input,
headers: {
'content-length': length,
},
})
.readAll('utf-8')
return fromCallback(pipeline, response, progress, output)
}
if (key === '$sendTo') {
ensurePathParam(method, file)
url = new URL(result[key], baseUrl)
const { size: length } = await stat(file)
const input = pipeline(
createReadStream(file),
progressStream(
{
length,
time: 1e3,
},
printProgress
),
noop
)
return hrp
.post(url, httpOptions, {
body: input,
headers: {
'content-length': length,
},
})
.readAll('utf-8')
}
}
}
return result
return result
} finally {
await xo.close()
}
}
COMMANDS.call = call

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xo-cli",
"version": "0.13.0",
"version": "0.14.0",
"license": "AGPL-3.0-or-later",
"description": "Basic CLI for Xen-Orchestra",
"keywords": [
@@ -29,7 +29,7 @@
"node": ">=14.13"
},
"dependencies": {
"chalk": "^4.1.0",
"chalk": "^5.0.1",
"exec-promise": "^0.7.0",
"fs-extra": "^10.0.0",
"getopts": "^2.3.0",
@@ -37,11 +37,11 @@
"human-format": "^1.0.0",
"lodash": "^4.17.4",
"micromatch": "^4.0.2",
"pretty-ms": "^7.0.0",
"pretty-ms": "^8.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.21.0",
"pw": "^0.0.4",
"xdg-basedir": "^4.0.0",
"xdg-basedir": "^5.1.0",
"xo-lib": "^0.11.1"
},
"scripts": {

View File

@@ -2,26 +2,51 @@ import filter from 'lodash/filter'
import map from 'lodash/map'
import trim from 'lodash/trim'
import trimStart from 'lodash/trimStart'
import queryString from 'querystring'
import urlParser from 'url-parse'
const NFS_RE = /^([^:]+):(?:(\d+):)?([^:]+)$/
const SMB_RE = /^([^:]+):(.+)@([^@]+)\\\\([^\0]+)(?:\0(.*))?$/
const NFS_RE = /^([^:]+):(?:(\d+):)?([^:?]+)(\?[^?]*)?$/
const SMB_RE = /^([^:]+):(.+)@([^@]+)\\\\([^\0?]+)(?:\0([^?]*))?(\?[^?]*)?$/
const sanitizePath = (...paths) => filter(map(paths, s => s && filter(map(s.split('/'), trim)).join('/'))).join('/')
export const parse = string => {
const object = {}
const parseOptionList = (optionList = '') => {
if (optionList.startsWith('?')) {
optionList = optionList.substring(1)
}
const parsed = queryString.parse(optionList)
Object.keys(parsed).forEach(key => {
const val = parsed[key]
parsed[key] = JSON.parse(val)
})
return parsed
}
const [type, rest] = string.split('://')
const makeOptionList = options => {
const encoded = {}
Object.keys(options).forEach(key => {
const val = options[key]
encoded[key] = JSON.stringify(val)
})
return queryString.stringify(encoded)
}
export const parse = string => {
let object = {}
let [type, rest] = string.split('://')
if (type === 'file') {
object.type = 'file'
let optionList
;[rest, optionList] = rest.split('?')
object.path = `/${trimStart(rest, '/')}` // the leading slash has been forgotten on client side first implementation
object = { ...parseOptionList(optionList), ...object }
} else if (type === 'nfs') {
object.type = 'nfs'
let host, port, path
let host, port, path, optionList
// Some users have a remote with a colon in the URL, which breaks the parsing since this commit: https://github.com/vatesfr/xen-orchestra/commit/fb1bf6a1e748b457f2d2b89ba02fa104554c03df
try {
;[, host, port, path] = NFS_RE.exec(rest)
;[, host, port, path, optionList] = NFS_RE.exec(rest)
} catch (err) {
;[host, path] = rest.split(':')
object.invalidUrl = true
@@ -29,16 +54,18 @@ export const parse = string => {
object.host = host
object.port = port
object.path = `/${trimStart(path, '/')}` // takes care of a missing leading slash coming from previous version format
object = { ...parseOptionList(optionList), ...object }
} else if (type === 'smb') {
object.type = 'smb'
const [, username, password, domain, host, path = ''] = SMB_RE.exec(rest)
const [, username, password, domain, host, path = '', optionList] = SMB_RE.exec(rest)
object.host = host
object.path = path
object.domain = domain
object.username = username
object.password = password
object = { ...parseOptionList(optionList), ...object }
} else if (type === 's3' || type === 's3+http') {
const parsed = urlParser(string, true)
const parsed = urlParser(string, false)
object.protocol = parsed.protocol === 's3:' ? 'https' : 'http'
object.type = 's3'
object.region = parsed.hash.length === 0 ? undefined : parsed.hash.slice(1) // remove '#'
@@ -46,24 +73,12 @@ export const parse = string => {
object.path = parsed.pathname
object.username = parsed.username
object.password = decodeURIComponent(parsed.password)
const qs = parsed.query
object.allowUnauthorized = qs.allowUnauthorized === 'true'
object = { ...parseOptionList(parsed.query), ...object }
}
return object
}
export const format = ({
type,
host,
path,
port,
username,
password,
domain,
protocol = type,
region,
allowUnauthorized = false,
}) => {
export const format = ({ type, host, path, port, username, password, domain, protocol = type, region, ...options }) => {
type === 'local' && (type = 'file')
let string = `${type}://`
if (type === 'nfs') {
@@ -85,8 +100,10 @@ export const format = ({
}
string += path
if (type === 's3' && allowUnauthorized === true) {
string += `?allowUnauthorized=true`
const optionsList = makeOptionList(options)
if (optionsList !== '') {
string += '?' + optionsList
}
if (type === 's3' && region !== undefined) {
string += `#${region}`

View File

@@ -15,6 +15,14 @@ const data = deepFreeze({
path: '/var/lib/xoa/backup',
},
},
'file with use vhd directory': {
string: 'file:///var/lib/xoa/backup?useVhdDirectory=true',
object: {
type: 'file',
path: '/var/lib/xoa/backup',
useVhdDirectory: true,
},
},
SMB: {
string: 'smb://Administrator:pas:sw@ord@toto\\\\192.168.100.225\\smb\0',
object: {
@@ -26,6 +34,18 @@ const data = deepFreeze({
password: 'pas:sw@ord',
},
},
'smb with directory': {
string: 'smb://Administrator:pas:sw@ord@toto\\\\192.168.100.225\\smb\0?useVhdDirectory=true',
object: {
type: 'smb',
host: '192.168.100.225\\smb',
path: '',
domain: 'toto',
username: 'Administrator',
password: 'pas:sw@ord',
useVhdDirectory: true,
},
},
NFS: {
string: 'nfs://192.168.100.225:/media/nfs',
object: {
@@ -44,8 +64,18 @@ const data = deepFreeze({
path: '/media/nfs',
},
},
'nfs with vhdDirectory': {
string: 'nfs://192.168.100.225:20:/media/nfs?useVhdDirectory=true',
object: {
type: 'nfs',
host: '192.168.100.225',
port: '20',
path: '/media/nfs',
useVhdDirectory: true,
},
},
S3: {
string: 's3://AKIAS:XSuBupZ0mJlu%2B@s3-us-west-2.amazonaws.com/test-bucket/dir',
string: 's3://AKIAS:XSuBupZ0mJlu%2B@s3-us-west-2.amazonaws.com/test-bucket/dir?allowUnauthorized=false',
object: {
type: 's3',
protocol: 'https',
@@ -70,6 +100,21 @@ const data = deepFreeze({
allowUnauthorized: true,
},
},
'S3 with brotli': {
string:
's3+http://Administrator:password@192.168.100.225/bucket/dir?compressionType=%22brotli%22&compressionOptions=%7B%22level%22%3A1%7D#reg1',
object: {
type: 's3',
host: '192.168.100.225',
protocol: 'http',
path: '/bucket/dir',
region: 'reg1',
username: 'Administrator',
password: 'password',
compressionType: 'brotli',
compressionOptions: { level: 1 },
},
},
})
const parseData = deepFreeze({
@@ -111,7 +156,6 @@ const parseData = deepFreeze({
region: 'reg1',
username: 'Administrator',
password: 'password',
allowUnauthorized: false,
},
},
'S3 accepting self signed certificate': {
@@ -126,19 +170,6 @@ const parseData = deepFreeze({
password: 'password',
allowUnauthorized: true,
},
'S3 with broken allowUnauthorized': {
string: 's3+http://Administrator:password@192.168.100.225/bucket/dir?allowUnauthorized=notTrue#reg1',
object: {
type: 's3',
host: '192.168.100.225',
protocol: 'http',
path: '/bucket/dir',
region: 'reg1',
username: 'Administrator',
password: 'password',
allowUnauthorized: false,
},
},
},
})
@@ -152,19 +183,6 @@ const formatData = deepFreeze({
path: '/var/lib/xoa/backup',
},
},
'S3 with broken allowUnauthorized': {
string: 's3+http://Administrator:password@192.168.100.225/bucket/dir#reg1',
object: {
type: 's3',
host: '192.168.100.225',
protocol: 'http',
path: '/bucket/dir',
region: 'reg1',
username: 'Administrator',
password: 'password',
allowUnauthorized: 'notTrue',
},
},
})
// -------------------------------------------------------------------

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-backup-reports",
"version": "0.16.10",
"version": "0.17.0",
"license": "AGPL-3.0-or-later",
"description": "Backup reports plugin for XO-Server",
"keywords": [

View File

@@ -154,7 +154,7 @@ poolMarkingPrefix = 'xo:clientInfo:'
[xo-proxy]
callTimeout = '1 min'
channel = 'xo-proxy-appliance'
channel = 'xo-proxy-appliance-{xoChannel}'
namespace = 'xoProxyAppliance'

View File

@@ -4,6 +4,7 @@
- [Collections](#collections)
- [VM Export](#vm-export)
- [VDI Export](#vdi-export)
- [VDI Import](#vdi-import)
- [The future](#the-future)
> This [REST](https://en.wikipedia.org/wiki/Representational_state_transfer)-oriented API is experimental. Non-backward compatible changes or removal may occur in any future release. Use of the feature is not recommended in production environments.
@@ -137,6 +138,28 @@ curl \
> myDisk.vhd
```
## VDI Import
A VHD can be imported on an SR to create a VDI at `/rest/v0/srs/<sr uuid>/vdis`.
```bash
curl \
-X POST \
-b authenticationToken=KQxQdm2vMiv7jBIK0hgkmgxKzemd8wSJ7ugFGKFkTbs \
-T myDisk.vhd \
'https://xo.company.lan/rest/v0/srs/357bd56c-71f9-4b2a-83b8-3451dec04b8f/vdis?name_label=my_imported_VDI' \
| cat
```
> Note: the final `| cat` ensures cURL's standard output is not a TTY, which is necessary for upload stats to be dislayed.
This request returns the UUID of the created VDI.
The following query parameters are supported to customize the created VDI:
- `name_label`
- `name_description`
## The future
We are adding features and improving the REST API step by step. If you have interesting use cases or feedback, please ask directly at <https://xcp-ng.org/forum/category/12/xen-orchestra>

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.93.1",
"version": "5.96.0",
"license": "AGPL-3.0-or-later",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -20,6 +20,7 @@
"preferGlobal": true,
"bin": {
"xo-server": "dist/cli.mjs",
"xo-server-db": "dist/db-cli.mjs",
"xo-server-logs": "dist/logs-cli.mjs",
"xo-server-recover-account": "dist/recover-account-cli.mjs"
},
@@ -39,22 +40,21 @@
"@vates/predicates": "^1.0.0",
"@vates/read-chunk": "^0.1.2",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.23.0",
"@xen-orchestra/backups": "^0.25.0",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/defined": "^0.0.1",
"@xen-orchestra/emit-async": "^0.1.0",
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/emit-async": "^1.0.0",
"@xen-orchestra/fs": "^1.0.3",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.4.0",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/mixins": "^0.5.0",
"@xen-orchestra/self-signed": "^0.1.3",
"@xen-orchestra/template": "^0.1.0",
"@xen-orchestra/xapi": "^1.0.0",
"@xen-orchestra/xapi": "^1.2.0",
"ajv": "^8.0.3",
"app-conf": "^2.1.0",
"async-iterator-to-stream": "^1.0.1",
"base64url": "^3.0.0",
"bind-property-descriptor": "^2.0.0",
"blocked-at": "^1.2.0",
"bluebird": "^3.5.1",
"body-parser": "^1.18.2",
@@ -81,7 +81,7 @@
"highland": "^2.11.1",
"http-proxy": "^1.16.2",
"http-request-plus": "^0.14.0",
"http-server-plus": "^0.11.0",
"http-server-plus": "^0.11.1",
"human-format": "^1.0.0",
"iterable-backoff": "^0.1.0",
"js-yaml": "^4.1.0",
@@ -126,15 +126,15 @@
"unzipper": "^0.10.5",
"uuid": "^8.3.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^3.1.0",
"vhd-lib": "^3.2.0",
"ws": "^8.2.3",
"xdg-basedir": "^5.1.0",
"xen-api": "^1.2.0",
"xen-api": "^1.2.1",
"xo-acl-resolver": "^0.4.1",
"xo-collection": "^0.5.0",
"xo-common": "^0.8.0",
"xo-remote-parser": "^0.8.0",
"xo-vmdk-to-vhd": "^2.3.0"
"xo-vmdk-to-vhd": "^2.4.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -0,0 +1,31 @@
import { createPredicate } from 'value-matcher'
import { extractIdsFromSimplePattern } from '@xen-orchestra/backups/extractIdsFromSimplePattern.js'
import { forbiddenOperation } from 'xo-common/api-errors.js'
export default async function backupGuard(poolId) {
const jobs = await this.getAllJobs('backup')
const guard = id => {
if (this.getObject(id).$poolId === poolId) {
throw forbiddenOperation('Backup is running', `A backup is running on the pool: ${poolId}`)
}
}
jobs.forEach(({ runId, vms }) => {
// If runId is undefined, the job is not currently running.
if (runId !== undefined) {
if (vms.id !== undefined) {
extractIdsFromSimplePattern(vms).forEach(guard)
} else {
// smartmode
// For the smartmode we take a simplified approach :
// if the smartmode is explicitly 'resident' or 'not resident' on pools : we check if it concern this pool
// if not, the job may concern this pool and we show the warning without looking through all the impacted VM
const isPoolSafe = vms.$pool === undefined ? false : !createPredicate(vms.$pool)(poolId)
if (!isPoolSafe) {
throw forbiddenOperation('May have running backup', `A backup may run on the pool: ${poolId}`)
}
}
}
})
}

View File

@@ -9,7 +9,7 @@ get.description = 'get existing ACLs'
// -------------------------------------------------------------------
export async function getCurrentPermissions() {
return /* await */ this.getPermissionsForUser(this.connection.get('user_id'))
return /* await */ this.getPermissionsForUser(this.apiContext.user.id)
}
getCurrentPermissions.description = 'get (explicit) permissions by object for the current user'

View File

@@ -8,8 +8,9 @@ getConnections.description = 'Get a list of all current connections to this API'
getConnections.permission = 'admin'
export function closeAllConnections() {
const currentConnection = this.apiContext.connection
for (const connection of this.apiConnections) {
if (connection !== this.connection) {
if (connection !== currentConnection) {
connection.close()
}
}

View File

@@ -7,7 +7,7 @@ import { REMOVE_CACHE_ENTRY } from '../_pDebounceWithKey.mjs'
import { safeDateFormat } from '../utils.mjs'
export function createJob({ schedules, ...job }) {
job.userId = this.user.id
job.userId = this.apiContext.user.id
return this.createBackupNgJob(job, schedules).then(({ id }) => id)
}

View File

@@ -6,10 +6,11 @@ import { defer } from 'golike-defer'
import { format, JsonRpcError } from 'json-rpc-peer'
import { noSuchObject } from 'xo-common/api-errors.js'
import { pipeline } from 'stream'
import { checkFooter, peekFooterFromVhdStream } from 'vhd-lib'
import { peekFooterFromVhdStream } from 'vhd-lib'
import { vmdkToVhd } from 'xo-vmdk-to-vhd'
import { VDI_FORMAT_VHD, VDI_FORMAT_RAW } from '../xapi/index.mjs'
import { parseSize } from '../utils.mjs'
const log = createLogger('xo:disk')
@@ -22,7 +23,7 @@ export const create = defer(async function ($defer, { name, size, sr, vm, bootab
let resourceSet
if (attach && (resourceSet = vm.resourceSet) != null) {
try {
await this.checkResourceSetConstraints(resourceSet, this.user.id, [sr.id])
await this.checkResourceSetConstraints(resourceSet, this.apiContext.user.id, [sr.id])
await this.allocateLimitsInResourceSet({ disk: size }, resourceSet)
$defer.onFailure(() => this.releaseLimitsInResourceSet({ disk: size }, resourceSet))
@@ -36,24 +37,26 @@ export const create = defer(async function ($defer, { name, size, sr, vm, bootab
// the resource set does not exist, falls back to normal check
}
await this.checkPermissions(this.user.id, [[sr.id, 'administrate']])
await this.checkPermissions([[sr.id, 'administrate']])
} while (false)
const xapi = this.getXapi(sr)
const vdi = await xapi.createVdi({
name_label: name,
size,
sr: sr._xapiId,
})
const vdi = await xapi._getOrWaitObject(
await xapi.VDI_create({
name_label: name,
SR: sr._xapiRef,
virtual_size: parseSize(size),
})
)
$defer.onFailure(() => vdi.$destroy())
if (attach) {
await xapi.createVbd({
await xapi.VBD_create({
bootable,
mode,
userdevice: position,
vdi: vdi.$id,
vm: vm._xapiId,
VDI: vdi.$ref,
VM: vm._xapiRef,
})
}
@@ -82,8 +85,11 @@ create.resolve = {
const VHD = 'vhd'
const VMDK = 'vmdk'
async function handleExportContent(req, res, { xapi, id, filename, format }) {
const stream = format === VMDK ? await xapi.exportVdiAsVmdk(id, filename) : await xapi.exportVdiContent(id)
async function handleExportContent(req, res, { filename, format, vdi }) {
const stream =
format === VMDK
? await vdi.$xapi.exportVdiAsVmdk(vdi.$id, filename)
: await vdi.$exportContent({ format: VDI_FORMAT_VHD })
req.on('close', () => stream.destroy())
// Remove the filename as it is already part of the URL.
@@ -103,8 +109,7 @@ export async function exportContent({ vdi, format = VHD }) {
$getFrom: await this.registerHttpRequest(
handleExportContent,
{
id: vdi._xapiId,
xapi: this.getXapi(vdi),
vdi: this.getXapiObject(vdi),
filename,
format,
},
@@ -126,20 +131,19 @@ exportContent.resolve = {
// -------------------------------------------------------------------
async function handleImportContent(req, res, { xapi, id }) {
async function handleImportContent(req, res, { vdi }) {
// Timeout seems to be broken in Node 4.
// See https://github.com/nodejs/node/issues/3319
req.setTimeout(43200000) // 12 hours
req.length = +req.headers['content-length']
await xapi.importVdiContent(id, req)
await vdi.$importContent(req, { format: VDI_FORMAT_VHD })
res.end(format.response(0, true))
}
export async function importContent({ vdi }) {
return {
$sendTo: await this.registerHttpRequest(handleImportContent, {
id: vdi._xapiId,
xapi: this.getXapi(vdi),
vdi: this.getXapiObject(vdi),
}),
}
}
@@ -192,14 +196,12 @@ async function handleImport(req, res, { type, name, description, vmdkData, srId,
break
case 'vhd':
{
const footer = await peekFooterFromVhdStream(vhdStream)
try {
checkFooter(footer)
} catch (e) {
const footer = await peekFooterFromVhdStream(part).catch(e => {
if (e instanceof assert.AssertionError) {
throw new JsonRpcError(`Vhd file had an invalid header ${e}`)
}
}
throw e
})
vhdStream = part
size = footer.currentSize
}
@@ -213,14 +215,16 @@ async function handleImport(req, res, { type, name, description, vmdkData, srId,
throw new JsonRpcError(`Unknown disk type, expected "iso", "vhd" or "vmdk", got ${type}`)
}
const vdi = await xapi.createVdi({
name_description: description,
name_label: name,
size,
sr: srId,
})
const vdi = await xapi._getOrWaitObject(
await xapi.VDI_create({
name_description: description,
name_label: name,
SR: xapi.getObject(srId, 'SR').$ref,
virtual_size: parseSize(size),
})
)
try {
await xapi.importVdiContent(vdi, vhdStream, { format: diskFormat })
await vdi.$importContent(vhdStream, { format: diskFormat })
res.end(format.response(0, vdi.$id))
} catch (e) {
await vdi.$destroy()

View File

@@ -1,6 +1,11 @@
import { createLogger } from '@xen-orchestra/log'
import assert from 'assert'
import { format } from 'json-rpc-peer'
import backupGuard from './_backupGuard.mjs'
const log = createLogger('xo:api:host')
// ===================================================================
export function setMaintenanceMode({ host, maintenance }) {
@@ -113,13 +118,22 @@ set.resolve = {
// FIXME: set force to false per default when correctly implemented in
// UI.
export function restart({ host, force = true }) {
export async function restart({ bypassBackupCheck = false, host, force = true }) {
if (bypassBackupCheck) {
log.warn('host.restart with argument "bypassBackupCheck" set to true', { hostId: host.id })
} else {
await backupGuard.call(this, host.$poolId)
}
return this.getXapi(host).rebootHost(host._xapiId, force)
}
restart.description = 'restart the host'
restart.params = {
bypassBackupCheck: {
type: 'boolean',
optional: true,
},
id: { type: 'string' },
force: {
type: 'boolean',
@@ -133,13 +147,22 @@ restart.resolve = {
// -------------------------------------------------------------------
export function restartAgent({ host }) {
export async function restartAgent({ bypassBackupCheck = false, host }) {
if (bypassBackupCheck) {
log.warn('host.restartAgent with argument "bypassBackupCheck" set to true', { hostId: host.id })
} else {
await backupGuard.call(this, host.$poolId)
}
return this.getXapiObject(host).$restartAgent()
}
restartAgent.description = 'restart the Xen agent on the host'
restartAgent.params = {
bypassBackupCheck: {
type: 'boolean',
optional: true,
},
id: { type: 'string' },
}
@@ -183,13 +206,22 @@ start.resolve = {
// -------------------------------------------------------------------
export function stop({ host, bypassEvacuate }) {
export async function stop({ bypassBackupCheck = false, host, bypassEvacuate }) {
if (bypassBackupCheck) {
log.warn('host.stop with argument "bypassBackupCheck" set to true', { hostId: host.id })
} else {
await backupGuard.call(this, host.$poolId)
}
return this.getXapi(host).shutdownHost(host._xapiId, { bypassEvacuate })
}
stop.description = 'stop the host'
stop.params = {
bypassBackupCheck: {
type: 'boolean',
optional: true,
},
id: { type: 'string' },
bypassEvacuate: { type: 'boolean', optional: true },
}

View File

@@ -18,12 +18,15 @@ delete_.description = 'Delete an ipPool'
// -------------------------------------------------------------------
export function getAll(params) {
const { user } = this
const { apiContext } = this
return this.getAllIpPools(user.permission === 'admin' ? params && params.userId : user.id)
return this.getAllIpPools(apiContext.permission === 'admin' ? params.userId : apiContext.user.id)
}
getAll.description = 'List all ipPools'
getAll.params = {
userId: { type: 'string', optional: true },
}
// -------------------------------------------------------------------

View File

@@ -26,7 +26,7 @@ get.params = {
export async function create({ job }) {
if (!job.userId) {
job.userId = this.connection.get('user_id')
job.userId = this.apiContext.user.id
}
return (await this.createJob(job)).id

View File

@@ -1,5 +1,5 @@
export function createJob({ schedules, ...job }) {
job.userId = this.user.id
job.userId = this.apiContext.user.id
return this.createMetadataBackupJob(job, schedules)
}

View File

@@ -1,11 +1,18 @@
import { asyncMap } from '@xen-orchestra/async-map'
import { createLogger } from '@xen-orchestra/log'
import { createPredicate } from 'value-matcher'
import { defer as deferrable } from 'golike-defer'
import { extractIdsFromSimplePattern } from '@xen-orchestra/backups/extractIdsFromSimplePattern.js'
import { format } from 'json-rpc-peer'
import { Ref } from 'xen-api'
import { incorrectState } from 'xo-common/api-errors.js'
import backupGuard from './_backupGuard.mjs'
import { moveFirst } from '../_moveFirst.mjs'
const log = createLogger('xo:api:pool')
// ===================================================================
export async function set({
@@ -62,7 +69,7 @@ set.resolve = {
// -------------------------------------------------------------------
export async function setDefaultSr({ sr }) {
await this.hasPermissions(this.user.id, [[sr.$pool, 'administrate']])
await this.hasPermissions(this.apiContext.user.id, [[sr.$pool, 'administrate']])
await this.getXapi(sr).setDefaultSr(sr._xapiId)
}
@@ -80,7 +87,7 @@ setDefaultSr.resolve = {
// -------------------------------------------------------------------
export async function setPoolMaster({ host }) {
await this.hasPermissions(this.user.id, [[host.$pool, 'administrate']])
await this.hasPermissions(this.apiContext.user.id, [[host.$pool, 'administrate']])
await this.getXapi(host).setPoolMaster(host._xapiId)
}
@@ -162,7 +169,51 @@ installPatches.description = 'Install patches on hosts'
// -------------------------------------------------------------------
export const rollingUpdate = deferrable(async function ($defer, { pool }) {
export const rollingUpdate = deferrable(async function ($defer, { bypassBackupCheck = false, pool }) {
const poolId = pool.id
if (bypassBackupCheck) {
log.warn('pool.rollingUpdate update with argument "bypassBackupCheck" set to true', { poolId })
} else {
await backupGuard.call(this, poolId)
}
const [schedules, jobs] = await Promise.all([this.getAllSchedules(), this.getAllJobs('backup')])
const jobsOfthePool = []
jobs.forEach(({ id: jobId, vms }) => {
if (vms.id !== undefined) {
for (const vmId of extractIdsFromSimplePattern(vms)) {
// try/catch to avoid `no such object`
try {
if (this.getObject(vmId).$poolId === poolId) {
jobsOfthePool.push(jobId)
break
}
} catch {}
}
} else {
// Smart mode
// For smart mode, we take a simplified approach:
// - if smart mode is explicitly 'resident' or 'not resident' on pools, we
// check if it concerns this pool
// - if not, the job may concern this pool so we add it to `jobsOfThePool`
if (vms.$pool === undefined || createPredicate(vms.$pool)(poolId)) {
jobsOfthePool.push(jobId)
}
}
})
// Disable schedules
await Promise.all(
schedules
.filter(schedule => jobsOfthePool.includes(schedule.jobId) && schedule.enabled)
.map(async schedule => {
await this.updateSchedule({ ...schedule, enabled: false })
$defer(() => this.updateSchedule({ ...schedule, enabled: true }))
})
)
// Disable load balancer
if ((await this.getOptionalPlugin('load-balancer'))?.loaded) {
await this.unloadPlugin('load-balancer')
$defer(() => this.loadPlugin('load-balancer'))
@@ -172,6 +223,10 @@ export const rollingUpdate = deferrable(async function ($defer, { pool }) {
})
rollingUpdate.params = {
bypassBackupCheck: {
optional: true,
type: 'boolean',
},
pool: { type: 'string' },
}
@@ -230,10 +285,7 @@ getPatchesDifference.resolve = {
// -------------------------------------------------------------------
export async function mergeInto({ source, sources = [source], target, force }) {
await this.checkPermissions(
this.user.id,
sources.map(source => [source, 'administrate'])
)
await this.checkPermissions(sources.map(source => [source, 'administrate']))
return this.mergeInto({
force,
sources,

View File

@@ -107,7 +107,7 @@ get.params = {
// -------------------------------------------------------------------
export async function getAll() {
return this.getAllResourceSets(this.user.id)
return this.getAllResourceSets(this.apiContext.user.id)
}
getAll.description = 'Get the list of all existing resource set'

View File

@@ -17,16 +17,14 @@ get.params = {
id: { type: 'string' },
}
export function create({ cron, enabled, healthCheckSr, healthCheckVmsWithTags, jobId, name, timezone }) {
export function create({ cron, enabled, jobId, name, timezone }) {
return this.createSchedule({
cron,
enabled,
healthCheckSr,
healthCheckVmsWithTags,
jobId,
name,
timezone,
userId: this.connection.get('user_id'),
userId: this.apiContext.user.id,
})
}
@@ -35,15 +33,13 @@ create.description = 'Creates a new schedule'
create.params = {
cron: { type: 'string' },
enabled: { type: 'boolean', optional: true },
healthCheckSr: { type: 'string', optional: true },
healthCheckVmsWithTags: { type: 'array', items: { type: 'string' }, optional: true },
jobId: { type: 'string' },
name: { type: 'string', optional: true },
timezone: { type: 'string', optional: true },
}
export async function set({ cron, enabled, healthCheckSr, healthCheckVmsWithTags, id, jobId, name, timezone }) {
await this.updateSchedule({ cron, enabled, healthCheckSr, healthCheckVmsWithTags, id, jobId, name, timezone })
export async function set({ cron, enabled, id, jobId, name, timezone }) {
await this.updateSchedule({ cron, enabled, id, jobId, name, timezone })
}
set.permission = 'admin'
@@ -51,8 +47,6 @@ set.description = 'Modifies an existing schedule'
set.params = {
cron: { type: 'string', optional: true },
enabled: { type: 'boolean', optional: true },
healthCheckSr: { type: 'string', optional: true },
healthCheckVmsWithTags: { type: 'array', items: { type: 'string' }, optional: true },
id: { type: 'string' },
jobId: { type: 'string', optional: true },
name: { type: ['string', 'null'], optional: true },

View File

@@ -5,7 +5,7 @@ import { getUserPublicProperties } from '../utils.mjs'
// ===================================================================
export async function signIn(credentials) {
const { connection } = this
const { connection } = this.apiContext
const { user, expiration } = await this.authenticateUser(credentials, {
ip: connection.get('user_ip', undefined),
@@ -47,7 +47,7 @@ signInWithToken.permission = null // user does not need to be authenticated
// -------------------------------------------------------------------
export function signOut() {
this.connection.unset('user_id')
this.apiContext.connection.unset('user_id')
}
signOut.description = 'sign out the user from the current session'
@@ -55,7 +55,7 @@ signOut.description = 'sign out the user from the current session'
// -------------------------------------------------------------------
export async function getUser() {
const userId = this.connection.get('user_id')
const userId = this.apiContext.user.id
return userId === undefined ? null : getUserPublicProperties(await this.getUser(userId))
}

View File

@@ -170,18 +170,15 @@ export async function createIso({
})
}
const srRef = await xapi.call(
'SR.create',
host._xapiRef,
deviceConfig,
'0', // SR size 0 because ISO
nameLabel,
nameDescription,
'iso', // SR type ISO
'iso', // SR content type ISO
type !== 'local',
{}
)
const srRef = await xapi.SR_create({
content_type: 'iso',
device_config: deviceConfig,
host: host._xapiRef,
name_description: nameDescription,
name_label: nameLabel,
shared: type !== 'local',
type: 'iso',
})
const sr = await xapi.call('SR.get_record', srRef)
return sr.uuid
@@ -247,18 +244,14 @@ export async function createNfs({
})
}
const srRef = await xapi.call(
'SR.create',
host._xapiRef,
deviceConfig,
'0',
nameLabel,
nameDescription,
'nfs', // SR LVM over iSCSI
'user', // recommended by Citrix
true,
{}
)
const srRef = await xapi.SR_create({
device_config: deviceConfig,
host: host._xapiRef,
name_description: nameDescription,
name_label: nameLabel,
shared: true,
type: 'nfs', // SR LVM over iSCSI
})
const sr = await xapi.call('SR.get_record', srRef)
return sr.uuid
@@ -302,18 +295,14 @@ export async function createHba({ host, nameLabel, nameDescription, scsiId, srUu
})
}
const srRef = await xapi.call(
'SR.create',
host._xapiRef,
deviceConfig,
'0',
nameLabel,
nameDescription,
'lvmohba', // SR LVM over HBA
'user', // recommended by Citrix
true,
{}
)
const srRef = await xapi.SR_create({
device_config: deviceConfig,
host: host._xapiRef,
name_description: nameDescription,
name_label: nameLabel,
shared: true,
type: 'lvmohba', // SR LVM over HBA
})
const sr = await xapi.call('SR.get_record', srRef)
return sr.uuid
@@ -343,18 +332,14 @@ export async function createLvm({ host, nameLabel, nameDescription, device }) {
device,
}
const srRef = await xapi.call(
'SR.create',
host._xapiRef,
deviceConfig,
'0',
nameLabel,
nameDescription,
'lvm', // SR LVM
'user', // recommended by Citrix
false,
{}
)
const srRef = await xapi.SR_create({
device_config: deviceConfig,
host: host._xapiRef,
name_description: nameDescription,
name_label: nameLabel,
shared: false,
type: 'lvm', // SR LVM
})
const sr = await xapi.call('SR.get_record', srRef)
return sr.uuid
@@ -383,18 +368,14 @@ export async function createExt({ host, nameLabel, nameDescription, device }) {
device,
}
const srRef = await xapi.call(
'SR.create',
host._xapiRef,
deviceConfig,
'0',
nameLabel,
nameDescription,
'ext', // SR ext
'user', // recommended by Citrix
false,
{}
)
const srRef = await xapi.SR_create({
device_config: deviceConfig,
host: host._xapiRef,
name_description: nameDescription,
name_label: nameLabel,
shared: false,
type: 'ext', // SR ext
})
const sr = await xapi.call('SR.get_record', srRef)
return sr.uuid
@@ -458,13 +439,18 @@ export async function createZfs({ host, nameLabel, nameDescription, location })
const xapi = this.getXapi(host)
// only XCP-ng >=8.2 support the ZFS SR
const types = await xapi.call('SR.get_supported_types')
return xapi.createSr({
hostRef: host._xapiRef,
name_label: nameLabel,
name_description: nameDescription,
type: types.includes('zfs') ? 'zfs' : 'file',
device_config: { location },
})
return await xapi.getField(
'SR',
await xapi.SR_create({
device_config: { location },
host: host._xapiRef,
name_description: nameDescription,
name_label: nameLabel,
shared: false,
type: types.includes('zfs') ? 'zfs' : 'file',
}),
'uuid'
)
}
createZfs.params = {
@@ -614,18 +600,14 @@ export async function createIscsi({
})
}
const srRef = await xapi.call(
'SR.create',
host._xapiRef,
deviceConfig,
'0',
nameLabel,
nameDescription,
'lvmoiscsi', // SR LVM over iSCSI
'user', // recommended by Citrix
true,
{}
)
const srRef = await xapi.SR_create({
device_config: deviceConfig,
host: host._xapiRef,
name_description: nameDescription,
name_label: nameLabel,
shared: true,
type: 'lvmoiscsi', // SR LVM over iSCSI
})
const sr = await xapi.call('SR.get_record', srRef)
return sr.uuid
@@ -931,3 +913,38 @@ stats.params = {
stats.resolve = {
sr: ['id', 'SR', 'view'],
}
// -------------------------------------------------------------------
export function enableMaintenanceMode({ sr, vmsToShutdown }) {
return this.getXapiObject(sr).$enableMaintenanceMode({ vmsToShutdown })
}
enableMaintenanceMode.description = 'switch the SR into maintenance mode'
enableMaintenanceMode.params = {
id: { type: 'string' },
vmsToShutdown: { type: 'array', items: { type: 'string' }, optional: true },
}
enableMaintenanceMode.permission = 'admin'
enableMaintenanceMode.resolve = {
sr: ['id', 'SR', 'operate'],
}
export function disableMaintenanceMode({ sr }) {
return this.getXapiObject(sr).$disableMaintenanceMode()
}
disableMaintenanceMode.description = 'disable the maintenance of the SR'
disableMaintenanceMode.params = {
id: { type: 'string' },
}
disableMaintenanceMode.permission = 'admin'
disableMaintenanceMode.resolve = {
sr: ['id', 'SR', 'operate'],
}

View File

@@ -73,4 +73,7 @@ export function methodSignature({ method: name }) {
]
}
methodSignature.description = 'returns the signature of an API method'
methodSignature.params = {
method: { type: 'string' },
}
methodSignature.permission = null // user does not need to be authenticated

View File

@@ -1,10 +1,11 @@
// TODO: Prevent token connections from creating tokens.
// TODO: Token permission.
export async function create({ expiresIn }) {
export async function create({ description, expiresIn }) {
return (
await this.createAuthenticationToken({
description,
expiresIn,
userId: this.connection.get('user_id'),
userId: this.apiContext.user.id,
})
).id
}
@@ -12,6 +13,10 @@ export async function create({ expiresIn }) {
create.description = 'create a new authentication token'
create.params = {
description: {
optional: true,
type: 'string',
},
expiresIn: {
optional: true,
type: ['number', 'string'],
@@ -20,7 +25,6 @@ create.params = {
// -------------------------------------------------------------------
// TODO: an user should be able to delete its own tokens.
async function delete_({ token: id }) {
await this.deleteAuthenticationToken(id)
}
@@ -29,8 +33,6 @@ export { delete_ as delete }
delete_.description = 'delete an existing authentication token'
delete_.permission = 'admin'
delete_.params = {
token: { type: 'string' },
}
@@ -40,7 +42,7 @@ delete_.params = {
export async function deleteAll({ except }) {
await this.deleteAuthenticationTokens({
filter: {
user_id: this.connection.get('user_id'),
user_id: this.apiContext.user.id,
id: {
__not: except,
},
@@ -53,3 +55,16 @@ deleteAll.description = 'delete all tokens of the current user except the curren
deleteAll.params = {
except: { type: 'string', optional: true },
}
// -------------------------------------------------------------------
export async function set({ id, ...props }) {
await this.updateAuthenticationToken({ id, user_id: this.apiContext.user.id }, props)
}
set.description = 'changes the properties of an existing token'
set.params = {
description: { type: ['null', 'string'], optional: true },
id: { type: 'string' },
}

View File

@@ -22,7 +22,7 @@ create.params = {
// Deletes an existing user.
async function delete_({ id }) {
if (id === this.connection.get('user_id')) {
if (id === this.apiContext.user.id) {
throw invalidParameters('a user cannot delete itself')
}
@@ -58,10 +58,18 @@ getAll.permission = 'admin'
// -------------------------------------------------------------------
export function getAuthenticationTokens() {
return this.getAuthenticationTokensForUser(this.apiContext.user.id)
}
getAuthenticationTokens.description = 'returns authentication tokens of the current user'
// -------------------------------------------------------------------
export async function set({ id, email, password, permission, preferences }) {
const isAdmin = this.user && this.user.permission === 'admin'
const isAdmin = this.apiContext.permission === 'admin'
if (isAdmin) {
if (permission && id === this.connection.get('user_id')) {
if (permission && id === this.apiContext.user.id) {
throw invalidParameters('a user cannot change its own permission')
}
} else if (email || password || permission) {
@@ -89,7 +97,7 @@ set.params = {
// -------------------------------------------------------------------
export async function changePassword({ oldPassword, newPassword }) {
const { user } = this
const { user } = this.apiContext
if (!isEmpty(user.authProviders)) {
throw forbiddenOperation('change password', 'synchronized users cannot change their passwords')

View File

@@ -1,7 +1,7 @@
// FIXME: too low level, should be removed.
async function delete_({ vbd }) {
await this.getXapi(vbd).deleteVbd(vbd)
await this.getXapiObject(vbd).$destroy()
}
delete_.params = {
@@ -17,8 +17,7 @@ export { delete_ as delete }
// -------------------------------------------------------------------
export async function disconnect({ vbd }) {
const xapi = this.getXapi(vbd)
await xapi.disconnectVbd(vbd._xapiRef)
await this.getXapiObject(vbd).$unplug()
}
disconnect.params = {

View File

@@ -54,14 +54,14 @@ export const set = defer(async function ($defer, params) {
vbds.length === 1 &&
(resourceSetId = xapi.xo.getData(this.getObject(vbds[0], 'VBD').VM, 'resourceSet')) !== undefined
) {
if (this.user.permission !== 'admin') {
await this.checkResourceSetConstraints(resourceSetId, this.user.id)
if (this.apiContext.permission !== 'admin') {
await this.checkResourceSetConstraints(resourceSetId, this.apiContext.user.id)
}
await this.allocateLimitsInResourceSet({ disk: size - vdi.size }, resourceSetId)
$defer.onFailure(() => this.releaseLimitsInResourceSet({ disk: size - vdi.size }, resourceSetId))
} else {
await this.checkPermissions(this.user.id, [[vdi.$SR, 'operate']])
await this.checkPermissions([[vdi.$SR, 'operate']])
}
await xapi.resizeVdi(ref, size)
@@ -105,11 +105,11 @@ set.resolve = {
export async function migrate({ vdi, sr, resourceSet }) {
const xapi = this.getXapi(vdi)
if (this.user.permission !== 'admin') {
if (this.apiContext.permission !== 'admin') {
if (resourceSet !== undefined) {
await this.checkResourceSetConstraints(resourceSet, this.user.id, [sr.id])
await this.checkResourceSetConstraints(resourceSet, this.apiContext.user.id, [sr.id])
} else {
await this.checkPermissions(this.user.id, [[sr.id, 'administrate']])
await this.checkPermissions([[sr.id, 'administrate']])
}
}

View File

@@ -84,19 +84,19 @@ export async function set({
const newIpAddresses = newIpv4Addresses.concat(newIpv6Addresses)
if (lockingMode !== undefined) {
await this.checkPermissions(this.user.id, [[network?.id ?? vif.$network, 'operate']])
await this.checkPermissions([[network?.id ?? vif.$network, 'operate']])
}
if (isNetworkChanged || mac) {
const networkId = network?.id
if (mac !== undefined && this.user.permission !== 'admin') {
await this.checkPermissions(this.user.id, [[networkId ?? vif.$network, 'administrate']])
if (mac !== undefined && this.apiContext.permission !== 'admin') {
await this.checkPermissions([[networkId ?? vif.$network, 'administrate']])
}
if (networkId !== undefined && this.user.permission !== 'admin') {
if (networkId !== undefined && this.apiContext.permission !== 'admin') {
if (resourceSet !== undefined) {
await this.checkResourceSetConstraints(resourceSet, this.user.id, [networkId])
await this.checkResourceSetConstraints(resourceSet, this.apiContext.user.id, [networkId])
} else {
await this.checkPermissions(this.user.id, [[networkId, 'operate']])
await this.checkPermissions([[networkId, 'operate']])
}
}

View File

@@ -34,7 +34,7 @@ function checkPermissionOnSrs(vm, permission = 'operate') {
return permissions.push([this.getObject(vdiId, ['VDI', 'VDI-snapshot']).$SR, permission])
})
return this.checkPermissions(this.connection.get('user_id'), permissions)
return this.checkPermissions(permissions)
}
// ===================================================================
@@ -47,11 +47,11 @@ const extract = (obj, prop) => {
// TODO: Implement ACLs
export const create = defer(async function ($defer, params) {
const { user } = this
const { user } = this.apiContext
const resourceSet = extract(params, 'resourceSet')
const template = extract(params, 'template')
if (resourceSet === undefined) {
await this.checkPermissions(this.user.id, [[template.$pool, 'administrate']])
await this.checkPermissions([[template.$pool, 'administrate']])
}
params.template = template._xapiId
@@ -444,7 +444,16 @@ insertCd.resolve = {
// -------------------------------------------------------------------
export async function migrate({ vm, host, sr, mapVdisSrs, mapVifsNetworks, migrationNetwork, force }) {
export async function migrate({
bypassAssert = false,
force,
host,
mapVdisSrs,
mapVifsNetworks,
migrationNetwork,
sr,
vm,
}) {
let mapVdisSrsXapi, mapVifsNetworksXapi
const permissions = []
@@ -466,7 +475,7 @@ export async function migrate({ vm, host, sr, mapVdisSrs, mapVifsNetworks, migra
})
}
await this.checkPermissions(this.user.id, permissions)
await this.checkPermissions(permissions)
await this.getXapi(vm)
.migrateVm(vm._xapiId, this.getXapi(host), host._xapiId, {
@@ -475,6 +484,7 @@ export async function migrate({ vm, host, sr, mapVdisSrs, mapVifsNetworks, migra
mapVifsNetworks: mapVifsNetworksXapi,
mapVdisSrs: mapVdisSrsXapi,
force,
bypassAssert,
})
.catch(error => {
if (error?.code !== undefined) {
@@ -507,6 +517,12 @@ migrate.params = {
// Identifier of the Network use for the migration
migrationNetwork: { type: 'string', optional: true },
bypassAssert: {
description: 'Bypass the verification asserting whether a VM can be migrated to the specified destination',
optional: true,
type: 'boolean',
},
}
migrate.resolve = {
@@ -524,7 +540,7 @@ export const set = defer(async function ($defer, params) {
const resourceSetId = extract(params, 'resourceSet')
if (resourceSetId !== undefined) {
if (this.user.permission !== 'admin') {
if (this.apiContext.permission !== 'admin') {
throw unauthorized()
}
@@ -557,7 +573,7 @@ export const set = defer(async function ($defer, params) {
}
}
if (limits.cpuWeight && this.user.permission !== 'admin') {
if (limits.cpuWeight && this.apiContext.permission !== 'admin') {
throw unauthorized()
}
})
@@ -638,7 +654,7 @@ set.params = {
virtualizationMode: { type: 'string', optional: true },
blockedOperations: { type: 'object', optional: true },
blockedOperations: { type: 'object', optional: true, properties: { '*': { type: ['boolean', 'null', 'string'] } } },
suspendSr: { type: ['string', 'null'], optional: true },
}
@@ -681,9 +697,9 @@ export const clone = defer(async function ($defer, { vm, name, full_copy: fullCo
await newVm.set_is_a_template(false)
}
const isAdmin = this.user.permission === 'admin'
const isAdmin = this.apiContext.permission === 'admin'
if (!isAdmin) {
await this.addAcl(this.user.id, newVm.$id, 'admin')
await this.addAcl(this.apiContext.user.id, newVm.$id, 'admin')
}
if (vm.resourceSet !== undefined) {
@@ -757,7 +773,7 @@ copy.resolve = {
export async function convertToTemplate({ vm }) {
// Convert to a template requires pool admin permission.
await this.checkPermissions(this.user.id, [[vm.$pool, 'administrate']])
await this.checkPermissions([[vm.$pool, 'administrate']])
await this.getXapiObject(vm).set_is_a_template(true)
}
@@ -801,7 +817,7 @@ export const snapshot = defer(async function (
$defer,
{ vm, name = `${vm.name_label}_${new Date().toISOString()}`, saveMemory = false, description }
) {
const { user } = this
const { user } = this.apiContext
let resourceSet
try {
if (vm.resourceSet !== undefined) {
@@ -836,7 +852,7 @@ export const snapshot = defer(async function (
await xapi.editVm(snapshotId, { name_description: description })
}
if (user.permission !== 'admin') {
if (this.apiContext.permission !== 'admin') {
await this.addAcl(user.id, snapshotId, 'admin')
}
return snapshotId
@@ -952,7 +968,7 @@ resume.resolve = {
// -------------------------------------------------------------------
export const revert = defer(async function ($defer, { snapshot }) {
await this.checkPermissions(this.user.id, [[snapshot.$snapshot_of, 'operate']])
await this.checkPermissions([[snapshot.$snapshot_of, 'operate']])
const vm = this.getObject(snapshot.$snapshot_of)
const { resourceSet } = vm
if (resourceSet !== undefined) {
@@ -974,7 +990,7 @@ export const revert = defer(async function ($defer, { snapshot }) {
// Compute the resource usage of the snapshot that's being reverted as if it
// was used by the VM
const snapshotUsage = await this.computeVmResourcesUsage(snapshot)
await this.allocateLimitsInResourceSet(snapshotUsage, resourceSet, this.user.permission === 'admin')
await this.allocateLimitsInResourceSet(snapshotUsage, resourceSet, this.apiContext.permission === 'admin')
$defer.onFailure(() => this.releaseLimitsInResourceSet(snapshotUsage, resourceSet))
// Reallocate the snapshot's IP addresses
@@ -1126,7 +1142,7 @@ async function import_({ data, sr, type = 'xva', url }) {
return {
$sendTo: await this.registerApiHttpRequest(
'vm.import',
this.connection,
this.apiContext.connection,
handleVmImport,
{ data, srId, type, xapi },
{ exposeAllErrors: true }
@@ -1180,12 +1196,12 @@ export { import_ as import }
// FIXME: if position is used, all other disks after this position
// should be shifted.
export async function attachDisk({ vm, vdi, position, mode, bootable }) {
await this.getXapi(vm).createVbd({
await this.getXapi(vm).VBD_create({
bootable,
mode,
userdevice: position,
vdi: vdi._xapiId,
vm: vm._xapiId,
VDI: vdi._xapiRef,
VM: vm._xapiRef,
})
}
@@ -1211,9 +1227,9 @@ attachDisk.resolve = {
export async function createInterface({ vm, network, position, mac, allowedIpv4Addresses, allowedIpv6Addresses }) {
const { resourceSet } = vm
if (resourceSet != null) {
await this.checkResourceSetConstraints(resourceSet, this.user.id, [network.id])
await this.checkResourceSetConstraints(resourceSet, this.apiContext.user.id, [network.id])
} else {
await this.checkPermissions(this.user.id, [[network.id, 'view']])
await this.checkPermissions([[network.id, 'view']])
}
let ipAddresses

Some files were not shown because too many files have changed in this diff Show More