Compare commits
168 Commits
complex-ma
...
nr-fix-fs-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d2bdd62b8 | ||
|
|
fffdb774a3 | ||
|
|
0659e4d725 | ||
|
|
dce95e875a | ||
|
|
ae94a512d9 | ||
|
|
bf51ba860a | ||
|
|
6aa8515df4 | ||
|
|
b6d3253d33 | ||
|
|
3bf4ee35a1 | ||
|
|
e08c600740 | ||
|
|
f823690b44 | ||
|
|
350b0c1e3c | ||
|
|
b01a6124a9 | ||
|
|
b00652f9eb | ||
|
|
19159a203a | ||
|
|
be8c77af5a | ||
|
|
8bb7803d23 | ||
|
|
54a85a8dd0 | ||
|
|
6fd40c0a7c | ||
|
|
97dd423486 | ||
|
|
281d60df4f | ||
|
|
43933f4089 | ||
|
|
4f7e140737 | ||
|
|
2b6945a382 | ||
|
|
8a3ae59f77 | ||
|
|
db253875cc | ||
|
|
a8359dcb75 | ||
|
|
e5dac06d91 | ||
|
|
e9f82558ed | ||
|
|
26f5ef5e31 | ||
|
|
874e889b36 | ||
|
|
bece5f7083 | ||
|
|
2f535e6db1 | ||
|
|
61c3057060 | ||
|
|
063d7d5cc4 | ||
|
|
0e0211050b | ||
|
|
c8c7245da1 | ||
|
|
3e27e50bab | ||
|
|
6b9d3ed60e | ||
|
|
11a78111de | ||
|
|
2655421171 | ||
|
|
c6bc2ea485 | ||
|
|
289b7a3dbe | ||
|
|
70083c6dca | ||
|
|
3e25b92369 | ||
|
|
806eaaf14b | ||
|
|
fb3f2d46fa | ||
|
|
14d06fe754 | ||
|
|
752146028b | ||
|
|
6c6ae30ce5 | ||
|
|
b00750bfa3 | ||
|
|
55eac005a0 | ||
|
|
257524de18 | ||
|
|
d4f78056dd | ||
|
|
66c054f24b | ||
|
|
711b722118 | ||
|
|
26614b5f40 | ||
|
|
9240211f3e | ||
|
|
67d84d956e | ||
|
|
97b620f98f | ||
|
|
2f5c91a1e1 | ||
|
|
038dad834d | ||
|
|
b3cd265955 | ||
|
|
2c670bc838 | ||
|
|
30c2b8e192 | ||
|
|
a00d45522b | ||
|
|
525369e0ce | ||
|
|
ba413f3e8f | ||
|
|
4afebca77b | ||
|
|
d2eb92143d | ||
|
|
e01d3c64fe | ||
|
|
9f497c9c2c | ||
|
|
9aae154c4e | ||
|
|
339f012794 | ||
|
|
af500d7b7b | ||
|
|
16a71b3917 | ||
|
|
7dfa104f65 | ||
|
|
44a7b1761f | ||
|
|
22c8ea255c | ||
|
|
a1c10828d8 | ||
|
|
25d69d1bd7 | ||
|
|
a84961f8ba | ||
|
|
e17b6790b5 | ||
|
|
815aed52d3 | ||
|
|
a03581ccd3 | ||
|
|
c10f6e6c6a | ||
|
|
18abd0384f | ||
|
|
4292bdd7b4 | ||
|
|
1149648399 | ||
|
|
b6846eb21d | ||
|
|
d19546fcb4 | ||
|
|
6a1eb198d1 | ||
|
|
e4757d4345 | ||
|
|
3873a59a37 | ||
|
|
cf9f6c10d7 | ||
|
|
8bcd9debc2 | ||
|
|
510a159eee | ||
|
|
062fb3ba30 | ||
|
|
3bc477d21b | ||
|
|
79eb2feb2c | ||
|
|
1fa42a5753 | ||
|
|
2eaab408dd | ||
|
|
f7fd0d9121 | ||
|
|
3b7b776ac4 | ||
|
|
43abc8440b | ||
|
|
37515b5da9 | ||
|
|
2dec327013 | ||
|
|
8f4dae3134 | ||
|
|
a584daa92d | ||
|
|
43431aa9a0 | ||
|
|
f196d2abec | ||
|
|
4a6724f664 | ||
|
|
a960737207 | ||
|
|
da08bd7fff | ||
|
|
517430f23d | ||
|
|
48e82ac15b | ||
|
|
eead64ff71 | ||
|
|
9ac6db2f4c | ||
|
|
92cf6bb887 | ||
|
|
1d3978ce2f | ||
|
|
16c71da487 | ||
|
|
214dbafd62 | ||
|
|
89b162704c | ||
|
|
fbf906d97c | ||
|
|
7961ff0785 | ||
|
|
00e53f455b | ||
|
|
d1d4839a09 | ||
|
|
31b19725b7 | ||
|
|
a776eaf61a | ||
|
|
ae2a92d229 | ||
|
|
dedc4aa8b9 | ||
|
|
7a8ca2f068 | ||
|
|
fdf52a3d59 | ||
|
|
e0987059d3 | ||
|
|
ee7217c7c9 | ||
|
|
1027659f34 | ||
|
|
424a212cc3 | ||
|
|
949ddbdcd7 | ||
|
|
7fcfc306f9 | ||
|
|
a691e033eb | ||
|
|
b76f62d470 | ||
|
|
01a90a1694 | ||
|
|
97bcc7afb6 | ||
|
|
9fa0ec440d | ||
|
|
28559cde02 | ||
|
|
6970d48cc3 | ||
|
|
52801c5afc | ||
|
|
7797bce814 | ||
|
|
18762dc624 | ||
|
|
5a828a6465 | ||
|
|
eaa9f36478 | ||
|
|
2b63134bcf | ||
|
|
8dcff63aea | ||
|
|
c2777607be | ||
|
|
9ba2b18fdb | ||
|
|
4ebc10db6a | ||
|
|
610b6c7bb0 | ||
|
|
357333c4e4 | ||
|
|
723334a685 | ||
|
|
b2c218ff83 | ||
|
|
adabd6966d | ||
|
|
b3eb1270dd | ||
|
|
7659a195d3 | ||
|
|
8d2e23f4a8 | ||
|
|
539d7dab5d | ||
|
|
06d43cdb24 | ||
|
|
af7bcf19ab | ||
|
|
7ebeb37881 |
@@ -48,9 +48,5 @@ module.exports = {
|
||||
'lines-between-class-members': 'off',
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
'node/no-extraneous-require': 'error',
|
||||
'prefer-const': 'error',
|
||||
},
|
||||
}
|
||||
|
||||
@@ -3,4 +3,9 @@ module.exports = {
|
||||
jsxSingleQuote: true,
|
||||
semi: false,
|
||||
singleQuote: true,
|
||||
|
||||
// 2020-11-24: Requested by nraynaud and approved by the rest of the team
|
||||
//
|
||||
// https://team.vates.fr/vates/pl/a1i8af1b9id7pgzm3jcg4toacy
|
||||
printWidth: 120,
|
||||
}
|
||||
|
||||
53
@vates/multi-key-map/README.md
Normal file
53
@vates/multi-key-map/README.md
Normal file
@@ -0,0 +1,53 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/multi-key-map
|
||||
|
||||
[](https://npmjs.org/package/@vates/multi-key-map)  [](https://bundlephobia.com/result?p=@vates/multi-key-map) [](https://npmjs.org/package/@vates/multi-key-map)
|
||||
|
||||
> Create map with values affected to multiple keys
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/multi-key-map):
|
||||
|
||||
```
|
||||
> npm install --save @vates/multi-key-map
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import { MultiKeyMap } from '@vates/multi-key-map'
|
||||
|
||||
const map = new MultiKeyMap()
|
||||
|
||||
const OBJ = {}
|
||||
map.set([], 0)
|
||||
map.set(['foo'], 1)
|
||||
map.set(['foo', 'bar'], 2)
|
||||
map.set(['bar', 'foo'], 3)
|
||||
map.set([OBJ], 4)
|
||||
map.set([{}], 5)
|
||||
|
||||
map.get([]) // 0
|
||||
map.get(['foo']) // 1
|
||||
map.get(['foo', 'bar']) // 2
|
||||
map.get(['bar', 'foo']) // 3
|
||||
map.get([OBJ]) // 4
|
||||
map.get([{}]) // undefined
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
20
@vates/multi-key-map/USAGE.md
Normal file
20
@vates/multi-key-map/USAGE.md
Normal file
@@ -0,0 +1,20 @@
|
||||
```js
|
||||
import { MultiKeyMap } from '@vates/multi-key-map'
|
||||
|
||||
const map = new MultiKeyMap()
|
||||
|
||||
const OBJ = {}
|
||||
map.set([], 0)
|
||||
map.set(['foo'], 1)
|
||||
map.set(['foo', 'bar'], 2)
|
||||
map.set(['bar', 'foo'], 3)
|
||||
map.set([OBJ], 4)
|
||||
map.set([{}], 5)
|
||||
|
||||
map.get([]) // 0
|
||||
map.get(['foo']) // 1
|
||||
map.get(['foo', 'bar']) // 2
|
||||
map.get(['bar', 'foo']) // 3
|
||||
map.get([OBJ]) // 4
|
||||
map.get([{}]) // undefined
|
||||
```
|
||||
@@ -67,7 +67,7 @@ function set(node, i, keys, value) {
|
||||
return node
|
||||
}
|
||||
|
||||
export default class MultiKeyMap {
|
||||
exports.MultiKeyMap = class MultiKeyMap {
|
||||
constructor() {
|
||||
// each node is either a value or a Node if it contains children
|
||||
this._root = undefined
|
||||
@@ -1,6 +1,6 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import MultiKeyMap from './_MultiKeyMap'
|
||||
const { MultiKeyMap } = require('./')
|
||||
|
||||
describe('MultiKeyMap', () => {
|
||||
it('works', () => {
|
||||
28
@vates/multi-key-map/package.json
Normal file
28
@vates/multi-key-map/package.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/multi-key-map",
|
||||
"description": "Create map with values affected to multiple keys",
|
||||
"keywords": [
|
||||
"cache",
|
||||
"map"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/multi-key-map",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/multi-key-map",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.1",
|
||||
"version": "0.2.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
@@ -119,9 +119,7 @@ export class AuditCore {
|
||||
if (record === undefined) {
|
||||
throw new MissingRecordError(newest, nValid)
|
||||
}
|
||||
if (
|
||||
newest !== createHash(record, newest.slice(1, newest.indexOf('$', 1)))
|
||||
) {
|
||||
if (newest !== createHash(record, newest.slice(1, newest.indexOf('$', 1)))) {
|
||||
throw new AlteredRecordError(newest, nValid, record)
|
||||
}
|
||||
newest = record.previousId
|
||||
|
||||
@@ -1,12 +1,6 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import {
|
||||
AlteredRecordError,
|
||||
AuditCore,
|
||||
MissingRecordError,
|
||||
NULL_ID,
|
||||
Storage,
|
||||
} from '.'
|
||||
import { AlteredRecordError, AuditCore, MissingRecordError, NULL_ID, Storage } from '.'
|
||||
|
||||
const asyncIteratorToArray = async asyncIterator => {
|
||||
const array = []
|
||||
@@ -88,16 +82,13 @@ describe('auditCore', () => {
|
||||
it('detects that a record is missing', async () => {
|
||||
const [newestRecord, deletedRecord] = await storeAuditRecords()
|
||||
|
||||
const nValidRecords = await auditCore.checkIntegrity(
|
||||
NULL_ID,
|
||||
newestRecord.id
|
||||
)
|
||||
const nValidRecords = await auditCore.checkIntegrity(NULL_ID, newestRecord.id)
|
||||
expect(nValidRecords).toBe(DATA.length)
|
||||
|
||||
await db.del(deletedRecord.id)
|
||||
await expect(
|
||||
auditCore.checkIntegrity(NULL_ID, newestRecord.id)
|
||||
).rejects.toEqual(new MissingRecordError(deletedRecord.id, 1))
|
||||
await expect(auditCore.checkIntegrity(NULL_ID, newestRecord.id)).rejects.toEqual(
|
||||
new MissingRecordError(deletedRecord.id, 1)
|
||||
)
|
||||
})
|
||||
|
||||
it('detects that a record has been altered', async () => {
|
||||
@@ -106,9 +97,7 @@ describe('auditCore', () => {
|
||||
alteredRecord.event = ''
|
||||
await db.put(alteredRecord)
|
||||
|
||||
await expect(
|
||||
auditCore.checkIntegrity(NULL_ID, newestRecord.id)
|
||||
).rejects.toEqual(
|
||||
await expect(auditCore.checkIntegrity(NULL_ID, newestRecord.id)).rejects.toEqual(
|
||||
new AlteredRecordError(alteredRecord.id, 1, alteredRecord)
|
||||
)
|
||||
})
|
||||
|
||||
@@ -38,18 +38,11 @@ const configs = {
|
||||
|
||||
const getConfig = (key, ...args) => {
|
||||
const config = configs[key]
|
||||
return config === undefined
|
||||
? {}
|
||||
: typeof config === 'function'
|
||||
? config(...args)
|
||||
: config
|
||||
return config === undefined ? {} : typeof config === 'function' ? config(...args) : config
|
||||
}
|
||||
|
||||
// some plugins must be used in a specific order
|
||||
const pluginsOrder = [
|
||||
'@babel/plugin-proposal-decorators',
|
||||
'@babel/plugin-proposal-class-properties',
|
||||
]
|
||||
const pluginsOrder = ['@babel/plugin-proposal-decorators', '@babel/plugin-proposal-class-properties']
|
||||
|
||||
module.exports = function (pkg, plugins, presets) {
|
||||
plugins === undefined && (plugins = {})
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
const curryRight = require('lodash/curryRight')
|
||||
|
||||
module.exports = curryRight((iterable, fn) =>
|
||||
Promise.all(
|
||||
Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn)
|
||||
)
|
||||
Promise.all(Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn))
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
// assigned when options are parsed by the main function
|
||||
let force
|
||||
let force, merge
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
@@ -41,9 +41,9 @@ const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain) {
|
||||
.forEach(parent => {
|
||||
console.warn(' ', parent)
|
||||
})
|
||||
force && console.warn(' merging…')
|
||||
merge && console.warn(' merging…')
|
||||
console.warn('')
|
||||
if (force) {
|
||||
if (merge) {
|
||||
// `mergeVhd` does not work with a stream, either
|
||||
// - make it accept a stream
|
||||
// - or create synthetic VHD which is not a stream
|
||||
@@ -115,9 +115,7 @@ async function handleVm(vmDir) {
|
||||
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
|
||||
vhdParents[path] = parent
|
||||
if (parent in vhdChildren) {
|
||||
const error = new Error(
|
||||
'this script does not support multiple VHD children'
|
||||
)
|
||||
const error = new Error('this script does not support multiple VHD children')
|
||||
error.parent = parent
|
||||
error.child1 = vhdChildren[parent]
|
||||
error.child2 = path
|
||||
@@ -224,11 +222,7 @@ async function handleVm(vmDir) {
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
|
||||
console.warn(
|
||||
' %i/%i missing VHDs',
|
||||
missingVhds.length,
|
||||
linkedVhds.length
|
||||
)
|
||||
console.warn(' %i/%i missing VHDs', missingVhds.length, linkedVhds.length)
|
||||
missingVhds.forEach(vhd => {
|
||||
console.warn(' ', vhd)
|
||||
})
|
||||
@@ -315,14 +309,16 @@ module.exports = async function main(args) {
|
||||
const opts = getopts(args, {
|
||||
alias: {
|
||||
force: 'f',
|
||||
merge: 'm',
|
||||
},
|
||||
boolean: ['force'],
|
||||
boolean: ['force', 'merge'],
|
||||
default: {
|
||||
force: false,
|
||||
merge: false,
|
||||
},
|
||||
})
|
||||
|
||||
;({ force } = opts)
|
||||
;({ force, merge } = opts)
|
||||
await asyncMap(opts._, async vmDir => {
|
||||
vmDir = resolve(vmDir)
|
||||
|
||||
|
||||
@@ -10,9 +10,7 @@ const sum = values => values.reduce((a, b) => a + b)
|
||||
|
||||
module.exports = async function info(vmDirs) {
|
||||
const jsonFiles = (
|
||||
await asyncMap(vmDirs, async vmDir =>
|
||||
(await readdir2(vmDir)).filter(_ => _.endsWith('.json'))
|
||||
)
|
||||
await asyncMap(vmDirs, async vmDir => (await readdir2(vmDir)).filter(_ => _.endsWith('.json')))
|
||||
).flat()
|
||||
|
||||
const hashes = { __proto__: null }
|
||||
@@ -39,9 +37,7 @@ module.exports = async function info(vmDirs) {
|
||||
size:
|
||||
json.length +
|
||||
(await (metadata.mode === 'delta'
|
||||
? asyncMap(Object.values(metadata.vhds), _ =>
|
||||
getSize(resolve(jsonDir, _))
|
||||
).then(sum)
|
||||
? asyncMap(Object.values(metadata.vhds), _ => getSize(resolve(jsonDir, _))).then(sum)
|
||||
: getSize(resolve(jsonDir, metadata.xva)))),
|
||||
}
|
||||
} catch (error) {
|
||||
|
||||
@@ -5,7 +5,7 @@ require('./_composeCommands')({
|
||||
get main() {
|
||||
return require('./commands/clean-vms')
|
||||
},
|
||||
usage: '[--force] xo-vm-backups/*',
|
||||
usage: '[--force] [--merge] xo-vm-backups/*',
|
||||
},
|
||||
'create-symlink-index': {
|
||||
get main() {
|
||||
|
||||
@@ -7,14 +7,14 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/backups": "^0.1.1",
|
||||
"@xen-orchestra/fs": "^0.11.1",
|
||||
"@xen-orchestra/fs": "^0.12.1",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.15.0",
|
||||
"promise-toolbox": "^0.16.0",
|
||||
"proper-lockfile": "^4.1.1",
|
||||
"vhd-lib": "^0.7.2"
|
||||
"vhd-lib": "^1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=7.10.1"
|
||||
@@ -33,7 +33,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.2.1",
|
||||
"version": "0.3.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
// returns all entries but the last retention-th
|
||||
exports.getOldEntries = (retention, entries) =>
|
||||
entries === undefined
|
||||
? []
|
||||
: retention > 0
|
||||
? entries.slice(0, -retention)
|
||||
: entries
|
||||
entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries
|
||||
|
||||
@@ -4,10 +4,7 @@ const fs = require('fs-extra')
|
||||
const isGzipFile = async fd => {
|
||||
// https://tools.ietf.org/html/rfc1952.html#page-5
|
||||
const magicNumber = Buffer.allocUnsafe(2)
|
||||
assert.strictEqual(
|
||||
(await fs.read(fd, magicNumber, 0, magicNumber.length, 0)).bytesRead,
|
||||
magicNumber.length
|
||||
)
|
||||
assert.strictEqual((await fs.read(fd, magicNumber, 0, magicNumber.length, 0)).bytesRead, magicNumber.length)
|
||||
return magicNumber[0] === 31 && magicNumber[1] === 139
|
||||
}
|
||||
|
||||
@@ -30,10 +27,7 @@ const isValidTar = async (size, fd) => {
|
||||
}
|
||||
|
||||
const buf = Buffer.allocUnsafe(1024)
|
||||
assert.strictEqual(
|
||||
(await fs.read(fd, buf, 0, buf.length, size - buf.length)).bytesRead,
|
||||
buf.length
|
||||
)
|
||||
assert.strictEqual((await fs.read(fd, buf, 0, buf.length, size - buf.length)).bytesRead, buf.length)
|
||||
return buf.every(_ => _ === 0)
|
||||
}
|
||||
|
||||
|
||||
@@ -32,14 +32,7 @@ ${cliName} v${pkg.version}
|
||||
)
|
||||
}
|
||||
|
||||
const [
|
||||
srcXapiUrl,
|
||||
srcSnapshotUuid,
|
||||
tgtXapiUrl,
|
||||
tgtVmUuid,
|
||||
jobId,
|
||||
scheduleId,
|
||||
] = args
|
||||
const [srcXapiUrl, srcSnapshotUuid, tgtXapiUrl, tgtVmUuid, jobId, scheduleId] = args
|
||||
|
||||
const srcXapi = new Xapi({
|
||||
allowUnauthorized: true,
|
||||
@@ -70,16 +63,10 @@ ${cliName} v${pkg.version}
|
||||
'xo:backup:vm': srcVm.uuid,
|
||||
}
|
||||
|
||||
const [srcDisks, tgtDisks] = await Promise.all([
|
||||
srcXapi.getVmDisks(srcSnapshot),
|
||||
tgtXapi.getVmDisks(tgtVm),
|
||||
])
|
||||
const [srcDisks, tgtDisks] = await Promise.all([srcXapi.getVmDisks(srcSnapshot), tgtXapi.getVmDisks(tgtVm)])
|
||||
const userDevices = Object.keys(tgtDisks)
|
||||
|
||||
const tgtSr = await tgtXapi.getRecord(
|
||||
'SR',
|
||||
tgtDisks[Object.keys(tgtDisks)[0]].SR
|
||||
)
|
||||
const tgtSr = await tgtXapi.getRecord('SR', tgtDisks[Object.keys(tgtDisks)[0]].SR)
|
||||
|
||||
await Promise.all([
|
||||
srcSnapshot.update_other_config(metadata),
|
||||
@@ -90,10 +77,7 @@ ${cliName} v${pkg.version}
|
||||
'xo:backup:sr': tgtSr.uuid,
|
||||
'xo:copy_of': srcSnapshotUuid,
|
||||
}),
|
||||
tgtVm.update_blocked_operations(
|
||||
'start',
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
),
|
||||
tgtVm.update_blocked_operations('start', 'Start operation for this vm is blocked, clone it if you want to use it.'),
|
||||
Promise.all(
|
||||
userDevices.map(userDevice => {
|
||||
const srcDisk = srcDisks[userDevice]
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -42,10 +42,7 @@ class Job {
|
||||
const now = schedule._createDate()
|
||||
scheduledDate = +next(schedule._schedule, now)
|
||||
const delay = scheduledDate - now
|
||||
this._timeout =
|
||||
delay < MAX_DELAY
|
||||
? setTimeout(wrapper, delay)
|
||||
: setTimeout(scheduleNext, MAX_DELAY)
|
||||
this._timeout = delay < MAX_DELAY ? setTimeout(wrapper, delay) : setTimeout(scheduleNext, MAX_DELAY)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,12 +70,7 @@ class Job {
|
||||
class Schedule {
|
||||
constructor(pattern, zone = 'utc') {
|
||||
this._schedule = parse(pattern)
|
||||
this._createDate =
|
||||
zone.toLowerCase() === 'utc'
|
||||
? moment.utc
|
||||
: zone === 'local'
|
||||
? moment
|
||||
: () => moment.tz(zone)
|
||||
this._createDate = zone.toLowerCase() === 'utc' ? moment.utc : zone === 'local' ? moment : () => moment.tz(zone)
|
||||
}
|
||||
|
||||
createJob(fn) {
|
||||
|
||||
@@ -37,9 +37,7 @@ describe('next()', () => {
|
||||
})
|
||||
|
||||
it('fails when no solutions has been found', () => {
|
||||
expect(() => N('0 0 30 feb *')).toThrow(
|
||||
'no solutions found for this schedule'
|
||||
)
|
||||
expect(() => N('0 0 30 feb *')).toThrow('no solutions found for this schedule')
|
||||
})
|
||||
|
||||
it('select the first sunday of the month', () => {
|
||||
|
||||
@@ -66,9 +66,7 @@ const createParser = ({ fields: [...fields], presets: { ...presets } }) => {
|
||||
aliasesRegExp.lastIndex = i
|
||||
const matches = aliasesRegExp.exec(pattern)
|
||||
if (matches === null) {
|
||||
throw new SyntaxError(
|
||||
`${field.name}: missing alias or integer at character ${i}`
|
||||
)
|
||||
throw new SyntaxError(`${field.name}: missing alias or integer at character ${i}`)
|
||||
}
|
||||
const [alias] = matches
|
||||
i += alias.length
|
||||
@@ -77,9 +75,7 @@ const createParser = ({ fields: [...fields], presets: { ...presets } }) => {
|
||||
|
||||
const { range } = field
|
||||
if (value < range[0] || value > range[1]) {
|
||||
throw new SyntaxError(
|
||||
`${field.name}: ${value} is not between ${range[0]} and ${range[1]}`
|
||||
)
|
||||
throw new SyntaxError(`${field.name}: ${value} is not between ${range[0]} and ${range[1]}`)
|
||||
}
|
||||
return value
|
||||
}
|
||||
@@ -117,9 +113,7 @@ const createParser = ({ fields: [...fields], presets: { ...presets } }) => {
|
||||
{
|
||||
const schedule = presets[p]
|
||||
if (schedule !== undefined) {
|
||||
return typeof schedule === 'string'
|
||||
? (presets[p] = parse(schedule))
|
||||
: schedule
|
||||
return typeof schedule === 'string' ? (presets[p] = parse(schedule)) : schedule
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,9 +136,7 @@ const createParser = ({ fields: [...fields], presets: { ...presets } }) => {
|
||||
|
||||
consumeWhitespaces()
|
||||
if (i !== n) {
|
||||
throw new SyntaxError(
|
||||
`unexpected character at offset ${i}, expected end`
|
||||
)
|
||||
throw new SyntaxError(`unexpected character at offset ${i}, expected end`)
|
||||
}
|
||||
|
||||
return schedule
|
||||
|
||||
@@ -33,9 +33,7 @@ describe('parse()', () => {
|
||||
})
|
||||
|
||||
it('reports invalid aliases', () => {
|
||||
expect(() => parse('* * * jan-foo *')).toThrow(
|
||||
'month: missing alias or integer at character 10'
|
||||
)
|
||||
expect(() => parse('* * * jan-foo *')).toThrow('month: missing alias or integer at character 10')
|
||||
})
|
||||
|
||||
it('dayOfWeek: 0 and 7 bind to sunday', () => {
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -60,5 +60,4 @@ export const get = (accessor: (input: ?any) => any, arg: ?any) => {
|
||||
// _ => new ProxyAgent(_)
|
||||
// )
|
||||
// ```
|
||||
export const ifDef = (value: ?any, thenFn: (value: any) => any) =>
|
||||
value !== undefined ? thenFn(value) : value
|
||||
export const ifDef = (value: ?any, thenFn: (value: any) => any) => (value !== undefined ? thenFn(value) : value)
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -19,6 +19,11 @@ import EE from 'events'
|
||||
import emitAsync from '@xen-orchestra/emit-async'
|
||||
|
||||
const ee = new EE()
|
||||
|
||||
// exposing emitAsync on our event emitter
|
||||
//
|
||||
// it's not required though and we could have used directly via
|
||||
// emitAsync.call(ee, event, args...)
|
||||
ee.emitAsync = emitAsync
|
||||
|
||||
ee.on('start', async function () {
|
||||
@@ -26,7 +31,7 @@ ee.on('start', async function () {
|
||||
})
|
||||
|
||||
// similar to EventEmmiter#emit() but returns a promise which resolves when all
|
||||
// listeners have resolved
|
||||
// listeners have settled
|
||||
await ee.emitAsync('start')
|
||||
|
||||
// by default, it will rejects as soon as one listener reject, you can customise
|
||||
|
||||
@@ -3,6 +3,11 @@ import EE from 'events'
|
||||
import emitAsync from '@xen-orchestra/emit-async'
|
||||
|
||||
const ee = new EE()
|
||||
|
||||
// exposing emitAsync on our event emitter
|
||||
//
|
||||
// it's not required though and we could have used directly via
|
||||
// emitAsync.call(ee, event, args...)
|
||||
ee.emitAsync = emitAsync
|
||||
|
||||
ee.on('start', async function () {
|
||||
@@ -10,7 +15,7 @@ ee.on('start', async function () {
|
||||
})
|
||||
|
||||
// similar to EventEmmiter#emit() but returns a promise which resolves when all
|
||||
// listeners have resolved
|
||||
// listeners have settled
|
||||
await ee.emitAsync('start')
|
||||
|
||||
// by default, it will rejects as soon as one listener reject, you can customise
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.11.1",
|
||||
"version": "0.12.1",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
@@ -22,17 +22,18 @@
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@marsaud/smb2": "^0.15.0",
|
||||
"@marsaud/smb2": "^0.17.2",
|
||||
"@sindresorhus/df": "^3.1.1",
|
||||
"@sullux/aws-sdk": "^1.0.5",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"aws-sdk": "^2.686.0",
|
||||
"decorator-synchronized": "^0.5.0",
|
||||
"execa": "^4.0.2",
|
||||
"execa": "^5.0.0",
|
||||
"fs-extra": "^9.0.0",
|
||||
"get-stream": "^6.0.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.15.0",
|
||||
"promise-toolbox": "^0.16.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^4.0.2",
|
||||
"tmp": "^0.2.1",
|
||||
|
||||
@@ -6,33 +6,19 @@ import { tmpdir } from 'os'
|
||||
|
||||
import LocalHandler from './local'
|
||||
|
||||
const sudoExeca = (command, args, opts) =>
|
||||
execa('sudo', [command, ...args], opts)
|
||||
const sudoExeca = (command, args, opts) => execa('sudo', [command, ...args], opts)
|
||||
|
||||
export default class MountHandler extends LocalHandler {
|
||||
constructor(
|
||||
remote,
|
||||
{
|
||||
mountsDir = join(tmpdir(), 'xo-fs-mounts'),
|
||||
useSudo = false,
|
||||
...opts
|
||||
} = {},
|
||||
params
|
||||
) {
|
||||
constructor(remote, { mountsDir = join(tmpdir(), 'xo-fs-mounts'), useSudo = false, ...opts } = {}, params) {
|
||||
super(remote, opts)
|
||||
|
||||
this._execa = useSudo ? sudoExeca : execa
|
||||
this._keeper = undefined
|
||||
this._params = {
|
||||
...params,
|
||||
options: [params.options, remote.options]
|
||||
.filter(_ => _ !== undefined)
|
||||
.join(','),
|
||||
options: [params.options, remote.options ?? params.defaultOptions].filter(_ => _ !== undefined).join(','),
|
||||
}
|
||||
this._realPath = join(
|
||||
mountsDir,
|
||||
remote.id || Math.random().toString(36).slice(2)
|
||||
)
|
||||
this._realPath = join(mountsDir, remote.id || Math.random().toString(36).slice(2))
|
||||
}
|
||||
|
||||
async _forget() {
|
||||
@@ -75,16 +61,12 @@ export default class MountHandler extends LocalHandler {
|
||||
|
||||
// Linux mount is more flexible in which order the mount arguments appear.
|
||||
// But FreeBSD requires this order of the arguments.
|
||||
await this._execa(
|
||||
'mount',
|
||||
['-o', options, '-t', type, device, realPath],
|
||||
{
|
||||
env: {
|
||||
LANG: 'C',
|
||||
...env,
|
||||
},
|
||||
}
|
||||
)
|
||||
await this._execa('mount', ['-o', options, '-t', type, device, realPath], {
|
||||
env: {
|
||||
LANG: 'C',
|
||||
...env,
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
try {
|
||||
// the failure may mean it's already mounted, use `findmnt` to check
|
||||
@@ -99,9 +81,7 @@ export default class MountHandler extends LocalHandler {
|
||||
|
||||
// keep an open file on the mount to prevent it from being unmounted if used
|
||||
// by another handler/process
|
||||
const keeperPath = `${realPath}/.keeper_${Math.random()
|
||||
.toString(36)
|
||||
.slice(2)}`
|
||||
const keeperPath = `${realPath}/.keeper_${Math.random().toString(36).slice(2)}`
|
||||
this._keeper = await fs.open(keeperPath, 'w')
|
||||
ignoreErrors.call(fs.unlink(keeperPath))
|
||||
}
|
||||
|
||||
@@ -86,9 +86,7 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
;({ timeout: this._timeout = DEFAULT_TIMEOUT } = options)
|
||||
|
||||
const sharedLimit = limit(
|
||||
options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS
|
||||
)
|
||||
const sharedLimit = limit(options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS)
|
||||
this.closeFile = sharedLimit(this.closeFile)
|
||||
this.getInfo = sharedLimit(this.getInfo)
|
||||
this.getSize = sharedLimit(this.getSize)
|
||||
@@ -122,16 +120,14 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
// TODO: remove method
|
||||
async createOutputStream(
|
||||
file: File,
|
||||
{ checksum = false, ...options }: Object = {}
|
||||
): Promise<LaxWritable> {
|
||||
async createOutputStream(file: File, { checksum = false, dirMode, ...options }: Object = {}): Promise<LaxWritable> {
|
||||
if (typeof file === 'string') {
|
||||
file = normalizePath(file)
|
||||
}
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const streamP = timeout.call(
|
||||
this._createOutputStream(file, {
|
||||
dirMode,
|
||||
flags: 'wx',
|
||||
...options,
|
||||
}),
|
||||
@@ -153,9 +149,7 @@ export default class RemoteHandlerAbstract {
|
||||
|
||||
// $FlowFixMe
|
||||
checksumStream.checksumWritten = checksumStream.checksum
|
||||
.then(value =>
|
||||
this._outputFile(checksumFile(path), value, { flags: 'wx' })
|
||||
)
|
||||
.then(value => this._outputFile(checksumFile(path), value, { flags: 'wx' }))
|
||||
.catch(forwardError)
|
||||
|
||||
return checksumStream
|
||||
@@ -169,30 +163,24 @@ export default class RemoteHandlerAbstract {
|
||||
file = normalizePath(file)
|
||||
}
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const streamP = timeout
|
||||
.call(this._createReadStream(file, options), this._timeout)
|
||||
.then(stream => {
|
||||
// detect early errors
|
||||
let promise = fromEvent(stream, 'readable')
|
||||
const streamP = timeout.call(this._createReadStream(file, options), this._timeout).then(stream => {
|
||||
// detect early errors
|
||||
let promise = fromEvent(stream, 'readable')
|
||||
|
||||
// try to add the length prop if missing and not a range stream
|
||||
if (
|
||||
stream.length === undefined &&
|
||||
options.end === undefined &&
|
||||
options.start === undefined
|
||||
) {
|
||||
promise = Promise.all([
|
||||
promise,
|
||||
ignoreErrors.call(
|
||||
this._getSize(file).then(size => {
|
||||
stream.length = size
|
||||
})
|
||||
),
|
||||
])
|
||||
}
|
||||
// try to add the length prop if missing and not a range stream
|
||||
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
|
||||
promise = Promise.all([
|
||||
promise,
|
||||
ignoreErrors.call(
|
||||
this._getSize(file).then(size => {
|
||||
stream.length = size
|
||||
})
|
||||
),
|
||||
])
|
||||
}
|
||||
|
||||
return promise.then(() => stream)
|
||||
})
|
||||
return promise.then(() => stream)
|
||||
})
|
||||
|
||||
if (!checksum) {
|
||||
return streamP
|
||||
@@ -205,10 +193,7 @@ export default class RemoteHandlerAbstract {
|
||||
checksum =>
|
||||
streamP.then(stream => {
|
||||
const { length } = stream
|
||||
stream = (validChecksumOfReadStream(
|
||||
stream,
|
||||
String(checksum).trim()
|
||||
): LaxReadable)
|
||||
stream = (validChecksumOfReadStream(stream, String(checksum).trim()): LaxReadable)
|
||||
stream.length = length
|
||||
|
||||
return stream
|
||||
@@ -226,11 +211,13 @@ export default class RemoteHandlerAbstract {
|
||||
async outputStream(
|
||||
input: Readable | Promise<Readable>,
|
||||
path: string,
|
||||
{ checksum = true }: { checksum?: boolean } = {}
|
||||
{ checksum = true, dirMode }: { checksum?: boolean, dirMode?: number } = {}
|
||||
): Promise<void> {
|
||||
path = normalizePath(path)
|
||||
input = await input
|
||||
return this._outputStream(await input, normalizePath(path), { checksum })
|
||||
return this._outputStream(await input, normalizePath(path), {
|
||||
checksum,
|
||||
dirMode,
|
||||
})
|
||||
}
|
||||
|
||||
// Free the resources possibly dedicated to put the remote at work, when it
|
||||
@@ -249,18 +236,12 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async getSize(file: File): Promise<number> {
|
||||
return timeout.call(
|
||||
this._getSize(typeof file === 'string' ? normalizePath(file) : file),
|
||||
this._timeout
|
||||
)
|
||||
return timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
|
||||
}
|
||||
|
||||
async list(
|
||||
dir: string,
|
||||
{
|
||||
filter,
|
||||
prependDir = false,
|
||||
}: { filter?: (name: string) => boolean, prependDir?: boolean } = {}
|
||||
{ filter, prependDir = false }: { filter?: (name: string) => boolean, prependDir?: boolean } = {}
|
||||
): Promise<string[]> {
|
||||
const virtualDir = normalizePath(dir)
|
||||
dir = normalizePath(dir)
|
||||
@@ -279,12 +260,12 @@ export default class RemoteHandlerAbstract {
|
||||
return entries
|
||||
}
|
||||
|
||||
async mkdir(dir: string): Promise<void> {
|
||||
await this.__mkdir(normalizePath(dir))
|
||||
async mkdir(dir: string, { mode }: { mode?: number } = {}): Promise<void> {
|
||||
await this.__mkdir(normalizePath(dir), { mode })
|
||||
}
|
||||
|
||||
async mktree(dir: string): Promise<void> {
|
||||
await this._mktree(normalizePath(dir))
|
||||
async mktree(dir: string, { mode }: { mode?: number } = {}): Promise<void> {
|
||||
await this._mktree(normalizePath(dir), { mode })
|
||||
}
|
||||
|
||||
openFile(path: string, flags: string): Promise<FileDescriptor> {
|
||||
@@ -294,53 +275,32 @@ export default class RemoteHandlerAbstract {
|
||||
async outputFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
{ flags = 'wx' }: { flags?: string } = {}
|
||||
{ dirMode, flags = 'wx' }: { dirMode?: number, flags?: string } = {}
|
||||
): Promise<void> {
|
||||
await this._outputFile(normalizePath(file), data, { flags })
|
||||
await this._outputFile(normalizePath(file), data, { dirMode, flags })
|
||||
}
|
||||
|
||||
async read(
|
||||
file: File,
|
||||
buffer: Buffer,
|
||||
position?: number
|
||||
): Promise<{| bytesRead: number, buffer: Buffer |}> {
|
||||
return this._read(
|
||||
typeof file === 'string' ? normalizePath(file) : file,
|
||||
buffer,
|
||||
position
|
||||
)
|
||||
async read(file: File, buffer: Buffer, position?: number): Promise<{| bytesRead: number, buffer: Buffer |}> {
|
||||
return this._read(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
|
||||
}
|
||||
|
||||
async readFile(
|
||||
file: string,
|
||||
{ flags = 'r' }: { flags?: string } = {}
|
||||
): Promise<Buffer> {
|
||||
async readFile(file: string, { flags = 'r' }: { flags?: string } = {}): Promise<Buffer> {
|
||||
return this._readFile(normalizePath(file), { flags })
|
||||
}
|
||||
|
||||
async rename(
|
||||
oldPath: string,
|
||||
newPath: string,
|
||||
{ checksum = false }: Object = {}
|
||||
) {
|
||||
async rename(oldPath: string, newPath: string, { checksum = false }: Object = {}) {
|
||||
oldPath = normalizePath(oldPath)
|
||||
newPath = normalizePath(newPath)
|
||||
|
||||
let p = timeout.call(this._rename(oldPath, newPath), this._timeout)
|
||||
if (checksum) {
|
||||
p = Promise.all([
|
||||
p,
|
||||
this._rename(checksumFile(oldPath), checksumFile(newPath)),
|
||||
])
|
||||
p = Promise.all([p, this._rename(checksumFile(oldPath), checksumFile(newPath))])
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
async rmdir(dir: string): Promise<void> {
|
||||
await timeout.call(
|
||||
this._rmdir(normalizePath(dir)).catch(ignoreEnoent),
|
||||
this._timeout
|
||||
)
|
||||
await timeout.call(this._rmdir(normalizePath(dir)).catch(ignoreEnoent), this._timeout)
|
||||
}
|
||||
|
||||
async rmtree(dir: string): Promise<void> {
|
||||
@@ -405,35 +365,23 @@ export default class RemoteHandlerAbstract {
|
||||
await this._unlink(file).catch(ignoreEnoent)
|
||||
}
|
||||
|
||||
async write(
|
||||
file: File,
|
||||
buffer: Buffer,
|
||||
position: number
|
||||
): Promise<{| bytesWritten: number, buffer: Buffer |}> {
|
||||
await this._write(
|
||||
typeof file === 'string' ? normalizePath(file) : file,
|
||||
buffer,
|
||||
position
|
||||
)
|
||||
async write(file: File, buffer: Buffer, position: number): Promise<{| bytesWritten: number, buffer: Buffer |}> {
|
||||
await this._write(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
|
||||
}
|
||||
|
||||
async writeFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
{ flags = 'wx' }: { flags?: string } = {}
|
||||
): Promise<void> {
|
||||
async writeFile(file: string, data: Data, { flags = 'wx' }: { flags?: string } = {}): Promise<void> {
|
||||
await this._writeFile(normalizePath(file), data, { flags })
|
||||
}
|
||||
|
||||
// Methods that can be called by private methods to avoid parallel limit on public methods
|
||||
|
||||
async __closeFile(fd: FileDescriptor): Promise<void> {
|
||||
await timeout.call(this._closeFile(fd.fd), this._timeout)
|
||||
await timeout.call(this._closeFile(fd), this._timeout)
|
||||
}
|
||||
|
||||
async __mkdir(dir: string): Promise<void> {
|
||||
async __mkdir(dir: string, { mode }: { mode?: number } = {}): Promise<void> {
|
||||
try {
|
||||
await this._mkdir(dir)
|
||||
await this._mkdir(dir, { mode })
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'EEXIST') {
|
||||
throw error
|
||||
@@ -459,7 +407,7 @@ export default class RemoteHandlerAbstract {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _createOutputStream(file: File, options: Object): Promise<LaxWritable> {
|
||||
async _createOutputStream(file: File, { dirMode, ...options }: Object = {}): Promise<LaxWritable> {
|
||||
try {
|
||||
return await this._createWriteStream(file, options)
|
||||
} catch (error) {
|
||||
@@ -468,7 +416,7 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
await this._mktree(dirname(file))
|
||||
await this._mktree(dirname(file), { mode: dirMode })
|
||||
return this._createOutputStream(file, options)
|
||||
}
|
||||
|
||||
@@ -499,43 +447,42 @@ export default class RemoteHandlerAbstract {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _mktree(dir: string): Promise<void> {
|
||||
async _mktree(dir: string, { mode }: { mode?: number } = {}): Promise<void> {
|
||||
try {
|
||||
return await this.__mkdir(dir)
|
||||
return await this.__mkdir(dir, { mode })
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
await this._mktree(dirname(dir))
|
||||
return this._mktree(dir)
|
||||
await this._mktree(dirname(dir), { mode })
|
||||
return this._mktree(dir, { mode })
|
||||
}
|
||||
|
||||
async _openFile(path: string, flags: string): Promise<mixed> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _outputFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
options: { flags?: string }
|
||||
): Promise<void> {
|
||||
async _outputFile(file: string, data: Data, { dirMode, flags }: { dirMode?: number, flags?: string }): Promise<void> {
|
||||
try {
|
||||
return await this._writeFile(file, data, options)
|
||||
return await this._writeFile(file, data, { flags })
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
await this._mktree(dirname(file))
|
||||
return this._outputFile(file, data, options)
|
||||
await this._mktree(dirname(file), { mode: dirMode })
|
||||
return this._outputFile(file, data, { flags })
|
||||
}
|
||||
|
||||
async _outputStream(input, path, { checksum }) {
|
||||
async _outputStream(input: Readable, path: string, { checksum, dirMode }: { checksum?: boolean, dirMode?: number }) {
|
||||
const tmpPath = `${dirname(path)}/.${basename(path)}`
|
||||
const output = await this.createOutputStream(tmpPath, { checksum })
|
||||
const output = await this.createOutputStream(tmpPath, {
|
||||
checksum,
|
||||
dirMode,
|
||||
})
|
||||
try {
|
||||
input.pipe(output)
|
||||
await fromEvent(output, 'finish')
|
||||
@@ -549,11 +496,7 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
_read(
|
||||
file: File,
|
||||
buffer: Buffer,
|
||||
position?: number
|
||||
): Promise<{| bytesRead: number, buffer: Buffer |}> {
|
||||
_read(file: File, buffer: Buffer, position?: number): Promise<{| bytesRead: number, buffer: Buffer |}> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
@@ -611,19 +554,11 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
async _writeFd(
|
||||
fd: FileDescriptor,
|
||||
buffer: Buffer,
|
||||
position: number
|
||||
): Promise<void> {
|
||||
async _writeFd(fd: FileDescriptor, buffer: Buffer, position: number): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _writeFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
options: { flags?: string }
|
||||
): Promise<void> {
|
||||
async _writeFile(file: string, data: Data, options: { flags?: string }): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
}
|
||||
@@ -643,8 +578,7 @@ function createPrefixWrapperMethods() {
|
||||
if (
|
||||
hasOwnProperty.call(pPw, name) ||
|
||||
name[0] === '_' ||
|
||||
typeof (value = (descriptor = getOwnPropertyDescriptor(pRha, name))
|
||||
.value) !== 'function'
|
||||
typeof (value = (descriptor = getOwnPropertyDescriptor(pRha, name)).value) !== 'function'
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -27,9 +27,7 @@ const ID_TO_ALGORITHM = invert(ALGORITHM_TO_ID)
|
||||
// const checksumStream = source.pipe(createChecksumStream())
|
||||
// checksumStream.resume() // make the data flow without an output
|
||||
// console.log(await checksumStream.checksum)
|
||||
export const createChecksumStream = (
|
||||
algorithm: string = 'md5'
|
||||
): Transform & { checksum: Promise<string> } => {
|
||||
export const createChecksumStream = (algorithm: string = 'md5'): Transform & { checksum: Promise<string> } => {
|
||||
const algorithmId = ALGORITHM_TO_ID[algorithm]
|
||||
|
||||
if (!algorithmId) {
|
||||
@@ -60,10 +58,7 @@ export const validChecksumOfReadStream = (
|
||||
stream: Readable,
|
||||
expectedChecksum: string
|
||||
): Readable & { checksumVerified: Promise<void> } => {
|
||||
const algorithmId = expectedChecksum.slice(
|
||||
1,
|
||||
expectedChecksum.indexOf('$', 1)
|
||||
)
|
||||
const algorithmId = expectedChecksum.slice(1, expectedChecksum.indexOf('$', 1))
|
||||
|
||||
if (!algorithmId) {
|
||||
throw new Error(`unknown algorithm: ${algorithmId}`)
|
||||
@@ -82,11 +77,7 @@ export const validChecksumOfReadStream = (
|
||||
const checksum = `$${algorithmId}$$${hash.digest('hex')}`
|
||||
|
||||
callback(
|
||||
checksum !== expectedChecksum
|
||||
? new Error(
|
||||
`Bad checksum (${checksum}), expected: ${expectedChecksum}`
|
||||
)
|
||||
: null
|
||||
checksum !== expectedChecksum ? new Error(`Bad checksum (${checksum}), expected: ${expectedChecksum}`) : null
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
@@ -126,16 +126,12 @@ handlers.forEach(url => {
|
||||
|
||||
it('can prepend the directory to entries', async () => {
|
||||
await handler.outputFile('dir/file', '')
|
||||
expect(await handler.list('dir', { prependDir: true })).toEqual([
|
||||
'/dir/file',
|
||||
])
|
||||
expect(await handler.list('dir', { prependDir: true })).toEqual(['/dir/file'])
|
||||
})
|
||||
|
||||
it('can prepend the directory to entries', async () => {
|
||||
await handler.outputFile('dir/file', '')
|
||||
expect(await handler.list('dir', { prependDir: true })).toEqual([
|
||||
'/dir/file',
|
||||
])
|
||||
expect(await handler.list('dir', { prependDir: true })).toEqual(['/dir/file'])
|
||||
})
|
||||
})
|
||||
|
||||
@@ -308,10 +304,7 @@ handlers.forEach(url => {
|
||||
return { offset, expected }
|
||||
})(),
|
||||
'increase file size': (() => {
|
||||
const offset = random(
|
||||
TEST_DATA_LEN - PATCH_DATA_LEN + 1,
|
||||
TEST_DATA_LEN
|
||||
)
|
||||
const offset = random(TEST_DATA_LEN - PATCH_DATA_LEN + 1, TEST_DATA_LEN)
|
||||
|
||||
const expected = Buffer.alloc(offset + PATCH_DATA_LEN)
|
||||
TEST_DATA.copy(expected)
|
||||
@@ -355,5 +348,11 @@ handlers.forEach(url => {
|
||||
}
|
||||
)
|
||||
})
|
||||
describe('#open()', () => {
|
||||
it('can do an open/close cycle without crashing', async () => {
|
||||
const file = await handler.openFile('write', 'w')
|
||||
expect(async () => handler.closeFile(file)).not.toThrow()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -18,7 +18,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _closeFile(fd) {
|
||||
return fs.close(fd)
|
||||
return fs.close(fd.fd)
|
||||
}
|
||||
|
||||
async _createReadStream(file, options) {
|
||||
@@ -63,9 +63,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _getSize(file) {
|
||||
const stats = await fs.stat(
|
||||
this._getFilePath(typeof file === 'string' ? file : file.path)
|
||||
)
|
||||
const stats = await fs.stat(this._getFilePath(typeof file === 'string' ? file : file.path))
|
||||
return stats.size
|
||||
}
|
||||
|
||||
@@ -73,8 +71,8 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
return fs.readdir(this._getFilePath(dir))
|
||||
}
|
||||
|
||||
_mkdir(dir) {
|
||||
return fs.mkdir(this._getFilePath(dir))
|
||||
_mkdir(dir, { mode }) {
|
||||
return fs.mkdir(this._getFilePath(dir), { mode })
|
||||
}
|
||||
|
||||
async _openFile(path, flags) {
|
||||
@@ -85,13 +83,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
const needsClose = typeof file === 'string'
|
||||
file = needsClose ? await fs.open(this._getFilePath(file), 'r') : file.fd
|
||||
try {
|
||||
return await fs.read(
|
||||
file,
|
||||
buffer,
|
||||
0,
|
||||
buffer.length,
|
||||
position === undefined ? null : position
|
||||
)
|
||||
return await fs.read(file, buffer, 0, buffer.length, position === undefined ? null : position)
|
||||
} finally {
|
||||
if (needsClose) {
|
||||
await fs.close(file)
|
||||
|
||||
@@ -2,15 +2,13 @@ import { parse } from 'xo-remote-parser'
|
||||
|
||||
import MountHandler from './_mount'
|
||||
|
||||
const DEFAULT_NFS_OPTIONS = 'vers=3'
|
||||
|
||||
export default class NfsHandler extends MountHandler {
|
||||
constructor(remote, opts) {
|
||||
const { host, port, path } = parse(remote.url)
|
||||
super(remote, opts, {
|
||||
type: 'nfs',
|
||||
device: `${host}${port !== undefined ? ':' + port : ''}:${path}`,
|
||||
options: DEFAULT_NFS_OPTIONS,
|
||||
defaultOptions: 'vers=3',
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import AWS from 'aws-sdk'
|
||||
import aws from '@sullux/aws-sdk'
|
||||
import assert from 'assert'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
@@ -16,16 +17,19 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
constructor(remote, _opts) {
|
||||
super(remote)
|
||||
const { host, path, username, password } = parse(remote.url)
|
||||
|
||||
// https://www.zenko.io/blog/first-things-first-getting-started-scality-s3-server/
|
||||
this._s3 = new AWS.S3({
|
||||
this._s3 = aws({
|
||||
accessKeyId: username,
|
||||
apiVersion: '2006-03-01',
|
||||
endpoint: host,
|
||||
s3ForcePathStyle: true,
|
||||
secretAccessKey: password,
|
||||
signatureVersion: 'v4',
|
||||
})
|
||||
httpOptions: {
|
||||
timeout: 600000,
|
||||
},
|
||||
}).s3
|
||||
|
||||
const splitPath = path.split('/').filter(s => s.length)
|
||||
this._bucket = splitPath.shift()
|
||||
this._dir = splitPath.join('/')
|
||||
@@ -50,37 +54,35 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
input.on('error', forwardError)
|
||||
inputStream = checksumStream
|
||||
}
|
||||
const upload = this._s3.upload(
|
||||
await this._s3.upload(
|
||||
{
|
||||
...this._createParams(path),
|
||||
Body: inputStream,
|
||||
},
|
||||
{ partSize: IDEAL_FRAGMENT_SIZE }
|
||||
{ partSize: IDEAL_FRAGMENT_SIZE, queueSize: 1 }
|
||||
)
|
||||
await upload.promise()
|
||||
if (checksum) {
|
||||
const checksum = await inputStream.checksum
|
||||
const params = {
|
||||
...this._createParams(path + '.checksum'),
|
||||
Body: checksum,
|
||||
}
|
||||
await this._s3.upload(params).promise()
|
||||
await this._s3.upload(params)
|
||||
}
|
||||
await input.task
|
||||
}
|
||||
|
||||
async _writeFile(file, data, options) {
|
||||
return this._s3
|
||||
.putObject({ ...this._createParams(file), Body: data })
|
||||
.promise()
|
||||
return this._s3.putObject({ ...this._createParams(file), Body: data })
|
||||
}
|
||||
|
||||
async _createReadStream(file, options) {
|
||||
return this._s3.getObject(this._createParams(file)).createReadStream()
|
||||
// https://github.com/Sullux/aws-sdk/issues/11
|
||||
return this._s3.getObject.raw(this._createParams(file)).createReadStream()
|
||||
}
|
||||
|
||||
async _unlink(file) {
|
||||
return this._s3.deleteObject(this._createParams(file)).promise()
|
||||
return this._s3.deleteObject(this._createParams(file))
|
||||
}
|
||||
|
||||
async _list(dir) {
|
||||
@@ -90,11 +92,10 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
|
||||
const prefix = [this._dir, dir].join('/')
|
||||
const splitPrefix = splitPath(prefix)
|
||||
const request = this._s3.listObjectsV2({
|
||||
const result = await this._s3.listObjectsV2({
|
||||
Bucket: this._bucket,
|
||||
Prefix: splitPrefix.join('/'),
|
||||
})
|
||||
const result = await request.promise()
|
||||
const uniq = new Set()
|
||||
for (const entry of result.Contents) {
|
||||
const line = splitPath(entry.Key)
|
||||
@@ -106,19 +107,32 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _rename(oldPath, newPath) {
|
||||
const params = {
|
||||
...this._createParams(newPath),
|
||||
CopySource: `/${this._bucket}/${this._dir}${oldPath}`,
|
||||
const size = await this._getSize(oldPath)
|
||||
const multipartParams = await this._s3.createMultipartUpload({ ...this._createParams(newPath) })
|
||||
const param2 = { ...multipartParams, CopySource: `/${this._bucket}/${this._dir}${oldPath}` }
|
||||
try {
|
||||
const parts = []
|
||||
let start = 0
|
||||
while (start < size) {
|
||||
const range = `bytes=${start}-${Math.min(start + MAX_PART_SIZE, size) - 1}`
|
||||
const partParams = { ...param2, PartNumber: parts.length + 1, CopySourceRange: range }
|
||||
const upload = await this._s3.uploadPartCopy(partParams)
|
||||
parts.push({ ETag: upload.CopyPartResult.ETag, PartNumber: partParams.PartNumber })
|
||||
start += MAX_PART_SIZE
|
||||
}
|
||||
await this._s3.completeMultipartUpload({ ...multipartParams, MultipartUpload: { Parts: parts } })
|
||||
} catch (e) {
|
||||
await this._s3.abortMultipartUpload(multipartParams)
|
||||
throw e
|
||||
}
|
||||
await this._s3.copyObject(params).promise()
|
||||
await this._s3.deleteObject(this._createParams(oldPath)).promise()
|
||||
await this._s3.deleteObject(this._createParams(oldPath))
|
||||
}
|
||||
|
||||
async _getSize(file) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.fd
|
||||
}
|
||||
const result = await this._s3.headObject(this._createParams(file)).promise()
|
||||
const result = await this._s3.headObject(this._createParams(file))
|
||||
return +result.ContentLength
|
||||
}
|
||||
|
||||
@@ -128,7 +142,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
const params = this._createParams(file)
|
||||
params.Range = `bytes=${position}-${position + buffer.length - 1}`
|
||||
const result = await this._s3.getObject(params).promise()
|
||||
const result = await this._s3.getObject(params)
|
||||
result.Body.copy(buffer)
|
||||
return { bytesRead: result.Body.length, buffer }
|
||||
}
|
||||
@@ -138,19 +152,13 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
file = file.fd
|
||||
}
|
||||
const uploadParams = this._createParams(file)
|
||||
const fileSize = +(await this._s3.headObject(uploadParams).promise())
|
||||
.ContentLength
|
||||
const fileSize = +(await this._s3.headObject(uploadParams)).ContentLength
|
||||
if (fileSize < MIN_PART_SIZE) {
|
||||
const resultBuffer = Buffer.alloc(
|
||||
Math.max(fileSize, position + buffer.length)
|
||||
)
|
||||
const fileContent = (await this._s3.getObject(uploadParams).promise())
|
||||
.Body
|
||||
const resultBuffer = Buffer.alloc(Math.max(fileSize, position + buffer.length))
|
||||
const fileContent = (await this._s3.getObject(uploadParams)).Body
|
||||
fileContent.copy(resultBuffer)
|
||||
buffer.copy(resultBuffer, position)
|
||||
await this._s3
|
||||
.putObject({ ...uploadParams, Body: resultBuffer })
|
||||
.promise()
|
||||
await this._s3.putObject({ ...uploadParams, Body: resultBuffer })
|
||||
return { buffer, bytesWritten: buffer.length }
|
||||
} else {
|
||||
// using this trick: https://stackoverflow.com/a/38089437/72637
|
||||
@@ -159,10 +167,13 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
// if `prefix` is bigger than 5Mo, it will be sourced from uploadPartCopy()
|
||||
// otherwise otherwise it will be downloaded, concatenated to `edit`
|
||||
// `edit` will always be an upload part
|
||||
// `suffix` will ways be sourced from uploadPartCopy()
|
||||
const multipartParams = await this._s3
|
||||
.createMultipartUpload(uploadParams)
|
||||
.promise()
|
||||
// `suffix` will always be sourced from uploadPartCopy()
|
||||
// Then everything will be sliced in 5Gb parts before getting uploaded
|
||||
const multipartParams = await this._s3.createMultipartUpload(uploadParams)
|
||||
const copyMultipartParams = {
|
||||
...multipartParams,
|
||||
CopySource: `/${this._bucket}/${this._dir + file}`,
|
||||
}
|
||||
try {
|
||||
const parts = []
|
||||
const prefixSize = position
|
||||
@@ -172,105 +183,70 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
let editBuffer = buffer
|
||||
let editBufferOffset = position
|
||||
let partNumber = 1
|
||||
if (prefixSize < MIN_PART_SIZE) {
|
||||
const downloadParams = {
|
||||
...uploadParams,
|
||||
Range: `bytes=0-${prefixSize - 1}`,
|
||||
}
|
||||
const prefixBuffer =
|
||||
prefixSize > 0
|
||||
? (await this._s3.getObject(downloadParams).promise()).Body
|
||||
: Buffer.alloc(0)
|
||||
let prefixPosition = 0
|
||||
// use floor() so that last fragment is handled in the if bellow
|
||||
let fragmentsCount = Math.floor(prefixSize / MAX_PART_SIZE)
|
||||
const prefixFragmentSize = MAX_PART_SIZE
|
||||
let prefixLastFragmentSize = prefixSize - prefixFragmentSize * fragmentsCount
|
||||
if (prefixLastFragmentSize >= MIN_PART_SIZE) {
|
||||
// the last fragment of the prefix is smaller than MAX_PART_SIZE, but bigger than the minimum
|
||||
// so we can copy it too
|
||||
fragmentsCount++
|
||||
prefixLastFragmentSize = 0
|
||||
}
|
||||
for (let i = 0; i < fragmentsCount; i++) {
|
||||
const fragmentEnd = Math.min(prefixPosition + prefixFragmentSize, prefixSize)
|
||||
assert.strictEqual(fragmentEnd - prefixPosition <= MAX_PART_SIZE, true)
|
||||
const range = `bytes=${prefixPosition}-${fragmentEnd - 1}`
|
||||
const copyPrefixParams = { ...copyMultipartParams, PartNumber: partNumber++, CopySourceRange: range }
|
||||
const part = await this._s3.uploadPartCopy(copyPrefixParams)
|
||||
parts.push({ ETag: part.CopyPartResult.ETag, PartNumber: copyPrefixParams.PartNumber })
|
||||
prefixPosition += prefixFragmentSize
|
||||
}
|
||||
if (prefixLastFragmentSize) {
|
||||
// grab everything from the prefix that was too small to be copied, download and merge to the edit buffer.
|
||||
const downloadParams = { ...uploadParams, Range: `bytes=${prefixPosition}-${prefixSize - 1}` }
|
||||
const prefixBuffer = prefixSize > 0 ? (await this._s3.getObject(downloadParams)).Body : Buffer.alloc(0)
|
||||
editBuffer = Buffer.concat([prefixBuffer, buffer])
|
||||
editBufferOffset = 0
|
||||
} else {
|
||||
const fragmentsCount = Math.ceil(prefixSize / MAX_PART_SIZE)
|
||||
const prefixFragmentSize = Math.ceil(prefixSize / fragmentsCount)
|
||||
const lastFragmentSize =
|
||||
prefixFragmentSize * fragmentsCount - prefixSize
|
||||
let prefixPosition = 0
|
||||
for (let i = 0; i < fragmentsCount; i++) {
|
||||
const copyPrefixParams = {
|
||||
...multipartParams,
|
||||
PartNumber: partNumber++,
|
||||
CopySource: `/${this._bucket}/${this._dir + file}`,
|
||||
CopySourceRange: `bytes=${prefixPosition}-${
|
||||
prefixPosition + prefixFragmentSize - 1
|
||||
}`,
|
||||
}
|
||||
const prefixPart = (
|
||||
await this._s3.uploadPartCopy(copyPrefixParams).promise()
|
||||
).CopyPartResult
|
||||
parts.push({
|
||||
ETag: prefixPart.ETag,
|
||||
PartNumber: copyPrefixParams.PartNumber,
|
||||
})
|
||||
prefixPosition += prefixFragmentSize
|
||||
}
|
||||
if (lastFragmentSize) {
|
||||
}
|
||||
editBufferOffset -= prefixLastFragmentSize
|
||||
}
|
||||
if (hasSuffix && editBuffer.length < MIN_PART_SIZE) {
|
||||
// the edit fragment is too short and is not the last fragment
|
||||
// let's steal from the suffix fragment to reach the minimum size
|
||||
// the suffix might be too short and itself entirely absorbed in the edit fragment, making it the last one.
|
||||
const complementSize = Math.min(
|
||||
MIN_PART_SIZE - editBuffer.length,
|
||||
suffixSize
|
||||
)
|
||||
const complementSize = Math.min(MIN_PART_SIZE - editBuffer.length, suffixSize)
|
||||
const complementOffset = editBufferOffset + editBuffer.length
|
||||
suffixOffset += complementSize
|
||||
suffixSize -= complementSize
|
||||
hasSuffix = suffixSize > 0
|
||||
const prefixRange = `bytes=${complementOffset}-${
|
||||
complementOffset + complementSize - 1
|
||||
}`
|
||||
const prefixRange = `bytes=${complementOffset}-${complementOffset + complementSize - 1}`
|
||||
const downloadParams = { ...uploadParams, Range: prefixRange }
|
||||
const complementBuffer = (
|
||||
await this._s3.getObject(downloadParams).promise()
|
||||
).Body
|
||||
const complementBuffer = (await this._s3.getObject(downloadParams)).Body
|
||||
editBuffer = Buffer.concat([editBuffer, complementBuffer])
|
||||
}
|
||||
const editParams = {
|
||||
...multipartParams,
|
||||
Body: editBuffer,
|
||||
PartNumber: partNumber++,
|
||||
}
|
||||
const editPart = await this._s3.uploadPart(editParams).promise()
|
||||
const editParams = { ...multipartParams, Body: editBuffer, PartNumber: partNumber++ }
|
||||
const editPart = await this._s3.uploadPart(editParams)
|
||||
parts.push({ ETag: editPart.ETag, PartNumber: editParams.PartNumber })
|
||||
if (hasSuffix) {
|
||||
// use ceil because the last fragment can be arbitrarily small.
|
||||
const suffixFragments = Math.ceil(suffixSize / MAX_PART_SIZE)
|
||||
const suffixFragmentsSize = Math.ceil(suffixSize / suffixFragments)
|
||||
let suffixFragmentOffset = suffixOffset
|
||||
for (let i = 0; i < suffixFragments; i++) {
|
||||
const fragmentEnd = suffixFragmentOffset + suffixFragmentsSize
|
||||
const suffixRange = `bytes=${suffixFragmentOffset}-${
|
||||
Math.min(fileSize, fragmentEnd) - 1
|
||||
}`
|
||||
const copySuffixParams = {
|
||||
...multipartParams,
|
||||
PartNumber: partNumber++,
|
||||
CopySource: `/${this._bucket}/${this._dir + file}`,
|
||||
CopySourceRange: suffixRange,
|
||||
}
|
||||
const suffixPart = (
|
||||
await this._s3.uploadPartCopy(copySuffixParams).promise()
|
||||
).CopyPartResult
|
||||
parts.push({
|
||||
ETag: suffixPart.ETag,
|
||||
PartNumber: copySuffixParams.PartNumber,
|
||||
})
|
||||
const fragmentEnd = suffixFragmentOffset + MAX_PART_SIZE
|
||||
assert.strictEqual(Math.min(fileSize, fragmentEnd) - suffixFragmentOffset <= MAX_PART_SIZE, true)
|
||||
const suffixRange = `bytes=${suffixFragmentOffset}-${Math.min(fileSize, fragmentEnd) - 1}`
|
||||
const copySuffixParams = { ...copyMultipartParams, PartNumber: partNumber++, CopySourceRange: suffixRange }
|
||||
const suffixPart = (await this._s3.uploadPartCopy(copySuffixParams)).CopyPartResult
|
||||
parts.push({ ETag: suffixPart.ETag, PartNumber: copySuffixParams.PartNumber })
|
||||
suffixFragmentOffset = fragmentEnd
|
||||
}
|
||||
}
|
||||
await this._s3
|
||||
.completeMultipartUpload({
|
||||
...multipartParams,
|
||||
MultipartUpload: { Parts: parts },
|
||||
})
|
||||
.promise()
|
||||
await this._s3.completeMultipartUpload({
|
||||
...multipartParams,
|
||||
MultipartUpload: { Parts: parts },
|
||||
})
|
||||
} catch (e) {
|
||||
await this._s3.abortMultipartUpload(multipartParams).promise()
|
||||
await this._s3.abortMultipartUpload(multipartParams)
|
||||
throw e
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,9 +5,7 @@ import normalizePath from './_normalizePath'
|
||||
|
||||
export default class SmbMountHandler extends MountHandler {
|
||||
constructor(remote, opts) {
|
||||
const { domain = 'WORKGROUP', host, password, path, username } = parse(
|
||||
remote.url
|
||||
)
|
||||
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
|
||||
super(remote, opts, {
|
||||
type: 'cifs',
|
||||
device: '//' + host + normalizePath(path),
|
||||
|
||||
@@ -17,8 +17,7 @@ const normalizeError = (error, shouldBeDirectory) => {
|
||||
? wrapError(error, 'EISDIR')
|
||||
: code === 'STATUS_NOT_A_DIRECTORY'
|
||||
? wrapError(error, 'ENOTDIR')
|
||||
: code === 'STATUS_OBJECT_NAME_NOT_FOUND' ||
|
||||
code === 'STATUS_OBJECT_PATH_NOT_FOUND'
|
||||
: code === 'STATUS_OBJECT_NAME_NOT_FOUND' || code === 'STATUS_OBJECT_PATH_NOT_FOUND'
|
||||
? wrapError(error, 'ENOENT')
|
||||
: code === 'STATUS_OBJECT_NAME_COLLISION'
|
||||
? wrapError(error, 'EEXIST')
|
||||
@@ -44,12 +43,7 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
_getFilePath(file) {
|
||||
return (
|
||||
this._prefix +
|
||||
(typeof file === 'string' ? file : file.path)
|
||||
.slice(1)
|
||||
.replace(/\//g, '\\')
|
||||
)
|
||||
return this._prefix + (typeof file === 'string' ? file : file.path).slice(1).replace(/\//g, '\\')
|
||||
}
|
||||
|
||||
_dirname(file) {
|
||||
@@ -96,15 +90,13 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
return this._client.readdir(this._getFilePath(dir)).catch(normalizeDirError)
|
||||
}
|
||||
|
||||
_mkdir(dir) {
|
||||
return this._client.mkdir(this._getFilePath(dir)).catch(normalizeDirError)
|
||||
_mkdir(dir, { mode }) {
|
||||
return this._client.mkdir(this._getFilePath(dir), mode).catch(normalizeDirError)
|
||||
}
|
||||
|
||||
// TODO: add flags
|
||||
_openFile(path, flags) {
|
||||
return this._client
|
||||
.open(this._getFilePath(path), flags)
|
||||
.catch(normalizeError)
|
||||
return this._client.open(this._getFilePath(path), flags).catch(normalizeError)
|
||||
}
|
||||
|
||||
async _read(file, buffer, position) {
|
||||
@@ -123,9 +115,7 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
_readFile(file, options) {
|
||||
return this._client
|
||||
.readFile(this._getFilePath(file), options)
|
||||
.catch(normalizeError)
|
||||
return this._client.readFile(this._getFilePath(file), options).catch(normalizeError)
|
||||
}
|
||||
|
||||
_rename(oldPath, newPath) {
|
||||
@@ -156,9 +146,7 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
_truncate(file, len) {
|
||||
return this._client
|
||||
.truncate(this._getFilePath(file), len)
|
||||
.catch(normalizeError)
|
||||
return this._client.truncate(this._getFilePath(file), len).catch(normalizeError)
|
||||
}
|
||||
|
||||
_unlink(file) {
|
||||
@@ -170,8 +158,6 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
_writeFile(file, data, options) {
|
||||
return this._client
|
||||
.writeFile(this._getFilePath(file), data, options)
|
||||
.catch(normalizeError)
|
||||
return this._client.writeFile(this._getFilePath(file), data, options).catch(normalizeError)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.15.0"
|
||||
"promise-toolbox": "^0.16.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -47,10 +47,7 @@ const createTransport = config => {
|
||||
return transport
|
||||
}
|
||||
|
||||
const symbol =
|
||||
typeof Symbol !== 'undefined'
|
||||
? Symbol.for('@xen-orchestra/log')
|
||||
: '@@@xen-orchestra/log'
|
||||
const symbol = typeof Symbol !== 'undefined' ? Symbol.for('@xen-orchestra/log') : '@@@xen-orchestra/log'
|
||||
|
||||
const { env } = process
|
||||
global[symbol] = createTransport({
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
import createTransport from './transports/console'
|
||||
import LEVELS, { resolve } from './levels'
|
||||
|
||||
const symbol =
|
||||
typeof Symbol !== 'undefined'
|
||||
? Symbol.for('@xen-orchestra/log')
|
||||
: '@@@xen-orchestra/log'
|
||||
const symbol = typeof Symbol !== 'undefined' ? Symbol.for('@xen-orchestra/log') : '@@@xen-orchestra/log'
|
||||
if (!(symbol in global)) {
|
||||
// the default behavior, without requiring `configure` is to avoid
|
||||
// logging anything unless it's a real error
|
||||
@@ -64,9 +61,7 @@ prototype.wrap = function (message, fn) {
|
||||
try {
|
||||
const result = fn.apply(this, arguments)
|
||||
const then = result != null && result.then
|
||||
return typeof then === 'function'
|
||||
? then.call(result, warnAndRethrow)
|
||||
: result
|
||||
return typeof then === 'function' ? then.call(result, warnAndRethrow) : result
|
||||
} catch (error) {
|
||||
warnAndRethrow(error)
|
||||
}
|
||||
|
||||
@@ -3,12 +3,7 @@ import LEVELS, { NAMES } from '../levels'
|
||||
const { DEBUG, ERROR, FATAL, INFO, WARN } = LEVELS
|
||||
|
||||
let formatLevel, formatNamespace
|
||||
if (
|
||||
process.stdout !== undefined &&
|
||||
process.stdout.isTTY &&
|
||||
process.stderr !== undefined &&
|
||||
process.stderr.isTTY
|
||||
) {
|
||||
if (process.stdout !== undefined && process.stdout.isTTY && process.stderr !== undefined && process.stderr.isTTY) {
|
||||
const ansi = (style, str) => `\x1b[${style}m${str}\x1b[0m`
|
||||
|
||||
const LEVEL_STYLES = {
|
||||
@@ -71,10 +66,7 @@ if (
|
||||
// const g = f(3)
|
||||
// const b = f(1)
|
||||
// return ansi(`38;2;${r};${g};${b}`, namespace)
|
||||
return ansi(
|
||||
`1;38;5;${NAMESPACE_COLORS[Math.abs(hash) % NAMESPACE_COLORS.length]}`,
|
||||
namespace
|
||||
)
|
||||
return ansi(`1;38;5;${NAMESPACE_COLORS[Math.abs(hash) % NAMESPACE_COLORS.length]}`, namespace)
|
||||
}
|
||||
} else {
|
||||
formatLevel = str => NAMES[str]
|
||||
@@ -84,21 +76,10 @@ if (
|
||||
const consoleTransport = ({ data, level, namespace, message, time }) => {
|
||||
const fn =
|
||||
/* eslint-disable no-console */
|
||||
level < INFO
|
||||
? console.log
|
||||
: level < WARN
|
||||
? console.info
|
||||
: level < ERROR
|
||||
? console.warn
|
||||
: console.error
|
||||
level < INFO ? console.log : level < WARN ? console.info : level < ERROR ? console.warn : console.error
|
||||
/* eslint-enable no-console */
|
||||
|
||||
const args = [
|
||||
time.toISOString(),
|
||||
formatNamespace(namespace),
|
||||
formatLevel(level),
|
||||
message,
|
||||
]
|
||||
const args = [time.toISOString(), formatNamespace(namespace), formatLevel(level), message]
|
||||
if (data != null) {
|
||||
args.push(data)
|
||||
}
|
||||
|
||||
@@ -54,11 +54,7 @@ export default ({
|
||||
transporter.sendMail(
|
||||
{
|
||||
subject: evalTemplate(subject, key =>
|
||||
key === 'level'
|
||||
? NAMES[log.level]
|
||||
: key === 'time'
|
||||
? log.time.toISOString()
|
||||
: log[key]
|
||||
key === 'level' ? NAMES[log.level] : key === 'time' ? log.time.toISOString() : log[key]
|
||||
),
|
||||
text: prettyFormat(log.data),
|
||||
},
|
||||
|
||||
@@ -4,16 +4,14 @@ import escapeRegExp from 'lodash/escapeRegExp'
|
||||
|
||||
const TPL_RE = /\{\{(.+?)\}\}/g
|
||||
export const evalTemplate = (tpl, data) => {
|
||||
const getData =
|
||||
typeof data === 'function' ? (_, key) => data(key) : (_, key) => data[key]
|
||||
const getData = typeof data === 'function' ? (_, key) => data(key) : (_, key) => data[key]
|
||||
|
||||
return tpl.replace(TPL_RE, getData)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const compileGlobPatternFragment = pattern =>
|
||||
pattern.split('*').map(escapeRegExp).join('.*')
|
||||
const compileGlobPatternFragment = pattern => pattern.split('*').map(escapeRegExp).join('.*')
|
||||
|
||||
export const compileGlobPattern = pattern => {
|
||||
const no = []
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -20,9 +20,7 @@ const isIgnoredStaticProperty = name => name in IGNORED_STATIC_PROPERTIES
|
||||
const ownKeys =
|
||||
(typeof Reflect !== 'undefined' && Reflect.ownKeys) ||
|
||||
(({ getOwnPropertyNames: names, getOwnPropertySymbols: symbols }) =>
|
||||
symbols !== undefined ? obj => names(obj).concat(symbols(obj)) : names)(
|
||||
Object
|
||||
)
|
||||
symbols !== undefined ? obj => names(obj).concat(symbols(obj)) : names)(Object)
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -50,10 +48,7 @@ const mixin = Mixins => Class => {
|
||||
throw new Error(`${name}#${prop} is already defined`)
|
||||
}
|
||||
|
||||
;(descriptors[prop] = getOwnPropertyDescriptor(
|
||||
Mixin,
|
||||
prop
|
||||
)).enumerable = false // Object methods are enumerable but class methods are not.
|
||||
;(descriptors[prop] = getOwnPropertyDescriptor(Mixin, prop)).enumerable = false // Object methods are enumerable but class methods are not.
|
||||
}
|
||||
})
|
||||
defineProperties(prototype, descriptors)
|
||||
@@ -81,11 +76,7 @@ const mixin = Mixins => Class => {
|
||||
throw new Error(`${name}#${prop} is already defined`)
|
||||
}
|
||||
|
||||
descriptors[prop] = getBoundPropertyDescriptor(
|
||||
prototype,
|
||||
prop,
|
||||
mixinInstance
|
||||
)
|
||||
descriptors[prop] = getBoundPropertyDescriptor(prototype, prop, mixinInstance)
|
||||
}
|
||||
defineProperties(instance, descriptors)
|
||||
}
|
||||
@@ -101,8 +92,7 @@ const mixin = Mixins => Class => {
|
||||
!(
|
||||
isIgnoredStaticProperty(prop) &&
|
||||
// if they already exist...
|
||||
(descriptor = getOwnPropertyDescriptor(DecoratedClass, prop)) !==
|
||||
undefined &&
|
||||
(descriptor = getOwnPropertyDescriptor(DecoratedClass, prop)) !== undefined &&
|
||||
// and are not configurable.
|
||||
!descriptor.configurable
|
||||
)
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -20,19 +20,11 @@ export default {
|
||||
|
||||
pack: object => {
|
||||
const version = object.header.version
|
||||
return get(
|
||||
OPENFLOW,
|
||||
version,
|
||||
`Unsupported OpenFlow version: ${version}`
|
||||
).pack(object)
|
||||
return get(OPENFLOW, version, `Unsupported OpenFlow version: ${version}`).pack(object)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const version = buffer.readUInt8(offset + scheme.offsets.version)
|
||||
return get(
|
||||
OPENFLOW,
|
||||
version,
|
||||
`Unsupported OpenFlow version: ${version}`
|
||||
).unpack(buffer, offset)
|
||||
return get(OPENFLOW, version, `Unsupported OpenFlow version: ${version}`).unpack(buffer, offset)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -41,18 +41,11 @@ const ACTION = {
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
const { type } = object
|
||||
return get(ACTION, type, `Invalid action type: ${type}`).pack(
|
||||
object,
|
||||
buffer,
|
||||
offset
|
||||
)
|
||||
return get(ACTION, type, `Invalid action type: ${type}`).pack(object, buffer, offset)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const type = buffer.readUInt16BE(offset + of.offsets.actionHeader.type)
|
||||
return get(ACTION, type, `Invalid action type: ${type}`).unpack(
|
||||
buffer,
|
||||
offset
|
||||
)
|
||||
return get(ACTION, type, `Invalid action type: ${type}`).unpack(buffer, offset)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -34,16 +34,11 @@ export default {
|
||||
|
||||
pack: object => {
|
||||
const type = object.header.type
|
||||
return get(MESSAGE, type, `Invalid OpenFlow message type: ${type}`).pack(
|
||||
object
|
||||
)
|
||||
return get(MESSAGE, type, `Invalid OpenFlow message type: ${type}`).pack(object)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const type = buffer.readUInt8(offset + of.offsets.header.type)
|
||||
return get(MESSAGE, type, `Invalid OpenFlow message type: ${type}`).unpack(
|
||||
buffer,
|
||||
offset
|
||||
)
|
||||
return get(MESSAGE, type, `Invalid OpenFlow message type: ${type}`).unpack(buffer, offset)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -37,11 +37,7 @@ const SIZES = {
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const TYPES = [
|
||||
of.instructionType.clearActions,
|
||||
of.instructionType.writeActions,
|
||||
of.instructionType.applyActions,
|
||||
]
|
||||
const TYPES = [of.instructionType.clearActions, of.instructionType.writeActions, of.instructionType.applyActions]
|
||||
const OFFSETS = of.offsets.instructionActions
|
||||
|
||||
const PAD_LENGTH = 4
|
||||
@@ -57,11 +53,7 @@ export default {
|
||||
actions.forEach(action => {
|
||||
assert(Object.values(of.actionType).includes(action.type))
|
||||
// TODO: manage experimenter
|
||||
object.len += get(
|
||||
SIZES,
|
||||
action.type,
|
||||
`Invalid action type: ${action.type}`
|
||||
)
|
||||
object.len += get(SIZES, action.type, `Invalid action type: ${action.type}`)
|
||||
})
|
||||
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.len)
|
||||
|
||||
@@ -26,18 +26,11 @@ const OFFSETS = of.offsets.instruction
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
const { type } = object
|
||||
return get(INSTRUCTION, type, `Invalid instruction type: ${type}`).pack(
|
||||
object,
|
||||
buffer,
|
||||
offset
|
||||
)
|
||||
return get(INSTRUCTION, type, `Invalid instruction type: ${type}`).pack(object, buffer, offset)
|
||||
},
|
||||
|
||||
unpack: (buffer = undefined, offset = 0) => {
|
||||
const type = buffer.readUInt16BE(offset + OFFSETS.type)
|
||||
return get(INSTRUCTION, type, `Invalid instruction type: ${type}`).unpack(
|
||||
buffer,
|
||||
offset
|
||||
)
|
||||
return get(INSTRUCTION, type, `Invalid instruction type: ${type}`).unpack(buffer, offset)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -33,12 +33,7 @@ export default {
|
||||
const dataSize = header.length - of.sizes.header
|
||||
if (dataSize > 0) {
|
||||
object.data = Buffer.alloc(dataSize)
|
||||
buffer.copy(
|
||||
object.data,
|
||||
0,
|
||||
offset + OFFSETS.data,
|
||||
offset + OFFSETS.data + dataSize
|
||||
)
|
||||
buffer.copy(object.data, 0, offset + OFFSETS.data, offset + OFFSETS.data + dataSize)
|
||||
}
|
||||
|
||||
return object
|
||||
|
||||
@@ -66,12 +66,7 @@ export default {
|
||||
const dataSize = header.length - of.sizes.errorMsg
|
||||
if (dataSize > 0) {
|
||||
object.data = Buffer.alloc(dataSize)
|
||||
buffer.copy(
|
||||
object.data,
|
||||
0,
|
||||
offset + OFFSETS.data,
|
||||
offset + OFFSETS.data + dataSize
|
||||
)
|
||||
buffer.copy(object.data, 0, offset + OFFSETS.data, offset + OFFSETS.data + dataSize)
|
||||
}
|
||||
|
||||
return object
|
||||
|
||||
@@ -13,15 +13,7 @@ const PAD_LENGTH = 3
|
||||
|
||||
export default {
|
||||
pack: object => {
|
||||
const {
|
||||
header,
|
||||
datapath_id: did,
|
||||
n_buffers: nBufs,
|
||||
n_tables: nTables,
|
||||
capabilities,
|
||||
reserved,
|
||||
ports,
|
||||
} = object
|
||||
const { header, datapath_id: did, n_buffers: nBufs, n_tables: nTables, capabilities, reserved, ports } = object
|
||||
assert(header.type === of.type.featuresReply)
|
||||
|
||||
header.length = of.sizes.switchFeatures + ports.length * of.sizes.port
|
||||
@@ -49,11 +41,7 @@ export default {
|
||||
assert(header.type === of.type.featuresReply)
|
||||
|
||||
const object = { header }
|
||||
object.datapath_id = buffer.toString(
|
||||
'hex',
|
||||
offset + OFFSETS.datapathId,
|
||||
offset + OFFSETS.datapathId + 8
|
||||
)
|
||||
object.datapath_id = buffer.toString('hex', offset + OFFSETS.datapathId, offset + OFFSETS.datapathId + 8)
|
||||
object.n_buffers = buffer.readUInt32BE(offset + OFFSETS.nBuffers)
|
||||
object.n_tables = buffer.readUInt8(offset + OFFSETS.nTables)
|
||||
|
||||
@@ -63,9 +51,7 @@ export default {
|
||||
object.ports = []
|
||||
const nPorts = (header.length - of.sizes.switchFeatures) / of.sizes.port
|
||||
for (let i = 0; i < nPorts; ++i) {
|
||||
object.ports.push(
|
||||
ofPort.unpack(buffer, offset + OFFSETS.ports + i * of.sizes.port)
|
||||
)
|
||||
object.ports.push(ofPort.unpack(buffer, offset + OFFSETS.ports + i * of.sizes.port))
|
||||
}
|
||||
|
||||
return object
|
||||
|
||||
@@ -76,18 +76,10 @@ export default {
|
||||
// fill header length
|
||||
header.length = of.sizes.flowMod
|
||||
instructions.forEach(instruction => {
|
||||
header.length += get(
|
||||
INSTRUCTION_SIZE,
|
||||
instruction.type,
|
||||
`Invalid instruction type: ${instruction.type}`
|
||||
)
|
||||
header.length += get(INSTRUCTION_SIZE, instruction.type, `Invalid instruction type: ${instruction.type}`)
|
||||
const { actions = [] } = instruction
|
||||
actions.forEach(action => {
|
||||
header.length += get(
|
||||
ACTION_SIZE,
|
||||
action.type,
|
||||
`Invalid instruction type: ${action.type}`
|
||||
)
|
||||
header.length += get(ACTION_SIZE, action.type, `Invalid instruction type: ${action.type}`)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -99,24 +91,12 @@ export default {
|
||||
if (cookie_mask !== undefined) {
|
||||
cookie_mask.copy(buffer, offset + OFFSETS.cookieMask)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.cookie_mask,
|
||||
offset + OFFSETS.cookieMask + COOKIE_LENGTH
|
||||
)
|
||||
buffer.fill(0x00, offset + OFFSETS.cookie_mask, offset + OFFSETS.cookieMask + COOKIE_LENGTH)
|
||||
}
|
||||
cookie.copy(buffer, offset + OFFSETS.cookie)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.cookie,
|
||||
offset + OFFSETS.cookie + COOKIE_LENGTH
|
||||
)
|
||||
buffer.fill(
|
||||
0xff,
|
||||
offset + OFFSETS.cookieMask,
|
||||
offset + OFFSETS.cookieMask + COOKIE_LENGTH
|
||||
)
|
||||
buffer.fill(0x00, offset + OFFSETS.cookie, offset + OFFSETS.cookie + COOKIE_LENGTH)
|
||||
buffer.fill(0xff, offset + OFFSETS.cookieMask, offset + OFFSETS.cookieMask + COOKIE_LENGTH)
|
||||
}
|
||||
|
||||
buffer.writeUInt8(table_id, offset + OFFSETS.tableId)
|
||||
@@ -149,12 +129,7 @@ export default {
|
||||
const object = { header }
|
||||
|
||||
object.cookie = Buffer.alloc(COOKIE_LENGTH)
|
||||
buffer.copy(
|
||||
object.cookie,
|
||||
0,
|
||||
offset + OFFSETS.cookie,
|
||||
offset + OFFSETS.cookie + COOKIE_LENGTH
|
||||
)
|
||||
buffer.copy(object.cookie, 0, offset + OFFSETS.cookie, offset + OFFSETS.cookie + COOKIE_LENGTH)
|
||||
if (
|
||||
!uIntHelper.isUInt64None([
|
||||
buffer.readUInt32BE(offset + OFFSETS.cookieMask),
|
||||
@@ -162,12 +137,7 @@ export default {
|
||||
])
|
||||
) {
|
||||
object.cookie_mask = Buffer.alloc(COOKIE_LENGTH)
|
||||
buffer.copy(
|
||||
object.cookie_mask,
|
||||
0,
|
||||
offset + OFFSETS.cookieMask,
|
||||
offset + OFFSETS.cookieMask + COOKIE_LENGTH
|
||||
)
|
||||
buffer.copy(object.cookie_mask, 0, offset + OFFSETS.cookieMask, offset + OFFSETS.cookieMask + COOKIE_LENGTH)
|
||||
}
|
||||
|
||||
object.table_id = buffer.readUInt8(offset + OFFSETS.tableId)
|
||||
|
||||
@@ -35,58 +35,26 @@ export default {
|
||||
|
||||
if (object.dl_src !== undefined) {
|
||||
if (object.dl_src_mask !== undefined) {
|
||||
addressParser.stringToEth(
|
||||
object.dl_src_mask,
|
||||
buffer,
|
||||
offset + OFFSETS.dlSrcMask
|
||||
)
|
||||
addressParser.stringToEth(object.dl_src_mask, buffer, offset + OFFSETS.dlSrcMask)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.dlSrcMask,
|
||||
offset + OFFSETS.dlSrcMask + of.ethAddrLen
|
||||
)
|
||||
buffer.fill(0x00, offset + OFFSETS.dlSrcMask, offset + OFFSETS.dlSrcMask + of.ethAddrLen)
|
||||
}
|
||||
addressParser.stringToEth(object.dl_src, buffer, offset + OFFSETS.dlSrc)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.dlSrc,
|
||||
offset + OFFSETS.dlSrc + of.ethAddrLen
|
||||
)
|
||||
buffer.fill(
|
||||
0xff,
|
||||
offset + OFFSETS.dlSrcMask,
|
||||
offset + OFFSETS.dlSrcMask + of.ethAddrLen
|
||||
)
|
||||
buffer.fill(0x00, offset + OFFSETS.dlSrc, offset + OFFSETS.dlSrc + of.ethAddrLen)
|
||||
buffer.fill(0xff, offset + OFFSETS.dlSrcMask, offset + OFFSETS.dlSrcMask + of.ethAddrLen)
|
||||
}
|
||||
|
||||
if (object.dl_dst !== undefined) {
|
||||
if (object.dl_dst_mask !== undefined) {
|
||||
addressParser.stringToEth(
|
||||
object.dl_dst_mask,
|
||||
buffer,
|
||||
offset + OFFSETS.dlDstMask
|
||||
)
|
||||
addressParser.stringToEth(object.dl_dst_mask, buffer, offset + OFFSETS.dlDstMask)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.dlDstMask,
|
||||
offset + OFFSETS.dlDstMask + of.ethAddrLen
|
||||
)
|
||||
buffer.fill(0x00, offset + OFFSETS.dlDstMask, offset + OFFSETS.dlDstMask + of.ethAddrLen)
|
||||
}
|
||||
addressParser.stringToEth(object.dl_dst, buffer, offset + OFFSETS.dlDst)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.dlDst,
|
||||
offset + OFFSETS.dlDst + of.ethAddrLen
|
||||
)
|
||||
buffer.fill(
|
||||
0xff,
|
||||
offset + OFFSETS.dlDstMask,
|
||||
offset + OFFSETS.dlDstMask + of.ethAddrLen
|
||||
)
|
||||
buffer.fill(0x00, offset + OFFSETS.dlDst, offset + OFFSETS.dlDst + of.ethAddrLen)
|
||||
buffer.fill(0xff, offset + OFFSETS.dlDstMask, offset + OFFSETS.dlDstMask + of.ethAddrLen)
|
||||
}
|
||||
|
||||
let dlVlan = 0
|
||||
@@ -133,58 +101,26 @@ export default {
|
||||
|
||||
if (object.nw_src !== undefined) {
|
||||
if (object.nw_src_mask !== undefined) {
|
||||
addressParser.stringToip4(
|
||||
object.nw_src_mask,
|
||||
buffer,
|
||||
offset + OFFSETS.nwSrcMask
|
||||
)
|
||||
addressParser.stringToip4(object.nw_src_mask, buffer, offset + OFFSETS.nwSrcMask)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.nwSrcMask,
|
||||
offset + OFFSETS.nwSrcMask + IP4_ADDR_LEN
|
||||
)
|
||||
buffer.fill(0x00, offset + OFFSETS.nwSrcMask, offset + OFFSETS.nwSrcMask + IP4_ADDR_LEN)
|
||||
}
|
||||
addressParser.stringToip4(object.nw_src, buffer, offset + OFFSETS.nwSrc)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.nwSrc,
|
||||
offset + OFFSETS.nwSrc + IP4_ADDR_LEN
|
||||
)
|
||||
buffer.fill(
|
||||
0xff,
|
||||
offset + OFFSETS.nwSrcMask,
|
||||
offset + OFFSETS.nwSrcMask + IP4_ADDR_LEN
|
||||
)
|
||||
buffer.fill(0x00, offset + OFFSETS.nwSrc, offset + OFFSETS.nwSrc + IP4_ADDR_LEN)
|
||||
buffer.fill(0xff, offset + OFFSETS.nwSrcMask, offset + OFFSETS.nwSrcMask + IP4_ADDR_LEN)
|
||||
}
|
||||
|
||||
if (object.nw_dst !== undefined) {
|
||||
if (object.nw_dst_mask !== undefined) {
|
||||
addressParser.stringToip4(
|
||||
object.nw_dst_mask,
|
||||
buffer,
|
||||
offset + OFFSETS.nwDstMask
|
||||
)
|
||||
addressParser.stringToip4(object.nw_dst_mask, buffer, offset + OFFSETS.nwDstMask)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.nwDstMask,
|
||||
offset + OFFSETS.nwDstMask + IP4_ADDR_LEN
|
||||
)
|
||||
buffer.fill(0x00, offset + OFFSETS.nwDstMask, offset + OFFSETS.nwDstMask + IP4_ADDR_LEN)
|
||||
}
|
||||
addressParser.stringToip4(object.nw_dst, buffer, offset + OFFSETS.nwDst)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.nwDst,
|
||||
offset + OFFSETS.nwDst + IP4_ADDR_LEN
|
||||
)
|
||||
buffer.fill(
|
||||
0xff,
|
||||
offset + OFFSETS.nwDstMask,
|
||||
offset + OFFSETS.nwDstMask + IP4_ADDR_LEN
|
||||
)
|
||||
buffer.fill(0x00, offset + OFFSETS.nwDst, offset + OFFSETS.nwDst + IP4_ADDR_LEN)
|
||||
buffer.fill(0xff, offset + OFFSETS.nwDstMask, offset + OFFSETS.nwDstMask + IP4_ADDR_LEN)
|
||||
}
|
||||
|
||||
let tpSrc = 0
|
||||
@@ -230,29 +166,12 @@ export default {
|
||||
offset + OFFSETS.metadataMask + METADATA_LENGTH
|
||||
)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.metadataMask,
|
||||
offset + OFFSETS.metadataMask + METADATA_LENGTH
|
||||
)
|
||||
buffer.fill(0x00, offset + OFFSETS.metadataMask, offset + OFFSETS.metadataMask + METADATA_LENGTH)
|
||||
}
|
||||
buffer.copy(
|
||||
object.metadata,
|
||||
0,
|
||||
offset + OFFSETS.metadata,
|
||||
offset + OFFSETS.metadata + METADATA_LENGTH
|
||||
)
|
||||
buffer.copy(object.metadata, 0, offset + OFFSETS.metadata, offset + OFFSETS.metadata + METADATA_LENGTH)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.metadata,
|
||||
offset + OFFSETS.metadata + METADATA_LENGTH
|
||||
)
|
||||
buffer.fill(
|
||||
0xff,
|
||||
offset + OFFSETS.metadataMask,
|
||||
offset + OFFSETS.metadataMask + METADATA_LENGTH
|
||||
)
|
||||
buffer.fill(0x00, offset + OFFSETS.metadata, offset + OFFSETS.metadata + METADATA_LENGTH)
|
||||
buffer.fill(0xff, offset + OFFSETS.metadataMask, offset + OFFSETS.metadataMask + METADATA_LENGTH)
|
||||
}
|
||||
|
||||
buffer.writeUInt32BE(wildcards, offset + OFFSETS.wildcards)
|
||||
@@ -270,28 +189,20 @@ export default {
|
||||
|
||||
// Wildcards indicate which value to use for the match.
|
||||
// if `wildcards & of.wildcards.<value>` === 0 then `value` is not wildcarded and must be used.
|
||||
const wildcards = (object.wildcards = buffer.readUInt32BE(
|
||||
offset + OFFSETS.wildcards
|
||||
))
|
||||
const wildcards = (object.wildcards = buffer.readUInt32BE(offset + OFFSETS.wildcards))
|
||||
if ((wildcards & WILDCARDS.inPort) === 0) {
|
||||
object.in_port = buffer.readUInt32BE(offset + OFFSETS.inPort)
|
||||
}
|
||||
|
||||
if (!addressParser.isEthMaskAll(buffer, offset + OFFSETS.dlSrcMask)) {
|
||||
if (!addressParser.isEthMaskNone(buffer, offset + OFFSETS.dlSrcMask)) {
|
||||
object.dl_src_mask = addressParser.ethToString(
|
||||
buffer,
|
||||
offset + OFFSETS.dlSrcMask
|
||||
)
|
||||
object.dl_src_mask = addressParser.ethToString(buffer, offset + OFFSETS.dlSrcMask)
|
||||
}
|
||||
object.dl_src = addressParser.ethToString(buffer, offset + OFFSETS.dlSrc)
|
||||
}
|
||||
if (!addressParser.isEthMaskAll(buffer, offset + OFFSETS.dlDstMask)) {
|
||||
if (!addressParser.isEthMaskNone(buffer, offset + OFFSETS.dlDstMask)) {
|
||||
object.dl_dst_mask = addressParser.ethToString(
|
||||
buffer,
|
||||
offset + OFFSETS.dlDstMask
|
||||
)
|
||||
object.dl_dst_mask = addressParser.ethToString(buffer, offset + OFFSETS.dlDstMask)
|
||||
}
|
||||
object.dl_dst = addressParser.ethToString(buffer, offset + OFFSETS.dlDst)
|
||||
}
|
||||
@@ -315,19 +226,13 @@ export default {
|
||||
|
||||
if (!addressParser.isIp4MaskAll(buffer, offset + OFFSETS.nwSrcMask)) {
|
||||
if (!addressParser.isIp4MaskNone(buffer, offset + OFFSETS.nwSrcMask)) {
|
||||
object.nw_src_mask = addressParser.ip4ToString(
|
||||
buffer,
|
||||
offset + OFFSETS.nwSrcMask
|
||||
)
|
||||
object.nw_src_mask = addressParser.ip4ToString(buffer, offset + OFFSETS.nwSrcMask)
|
||||
}
|
||||
object.nw_src = addressParser.ip4ToString(buffer, offset + OFFSETS.nwSrc)
|
||||
}
|
||||
if (!addressParser.isIp4MaskAll(buffer, offset + OFFSETS.nwDstMask)) {
|
||||
if (!addressParser.isIp4MaskNone(buffer, offset + OFFSETS.nwDstMask)) {
|
||||
object.nw_dst_mask = addressParser.ip4ToString(
|
||||
buffer,
|
||||
offset + OFFSETS.nwDstMask
|
||||
)
|
||||
object.nw_dst_mask = addressParser.ip4ToString(buffer, offset + OFFSETS.nwDstMask)
|
||||
}
|
||||
object.nw_dst = addressParser.ip4ToString(buffer, offset + OFFSETS.nwDst)
|
||||
}
|
||||
@@ -361,12 +266,7 @@ export default {
|
||||
)
|
||||
}
|
||||
object.metadata = Buffer.alloc(METADATA_LENGTH)
|
||||
buffer.copy(
|
||||
object.metadata,
|
||||
0,
|
||||
offset + OFFSETS.metadata,
|
||||
offset + OFFSETS.metadata + METADATA_LENGTH
|
||||
)
|
||||
buffer.copy(object.metadata, 0, offset + OFFSETS.metadata, offset + OFFSETS.metadata + METADATA_LENGTH)
|
||||
}
|
||||
|
||||
return object
|
||||
|
||||
@@ -32,11 +32,7 @@ export default {
|
||||
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD2_LENGTH)
|
||||
buffer.write(name, offset + OFFSETS.name, of.maxPortNameLen)
|
||||
if (name.length < of.maxPortNameLen) {
|
||||
buffer.fill(
|
||||
0,
|
||||
offset + OFFSETS.name + name.length,
|
||||
offset + OFFSETS.name + of.maxPortNameLen
|
||||
)
|
||||
buffer.fill(0, offset + OFFSETS.name + name.length, offset + OFFSETS.name + of.maxPortNameLen)
|
||||
}
|
||||
|
||||
buffer.writeUInt32BE(config, offset + OFFSETS.config)
|
||||
@@ -56,11 +52,7 @@ export default {
|
||||
body.port_no = buffer.readUInt32BE(offset + OFFSETS.portNo)
|
||||
body.hw_addr = addressParser.ethToString(buffer, offset + OFFSETS.hwAddr)
|
||||
|
||||
const name = buffer.toString(
|
||||
'utf8',
|
||||
offset + OFFSETS.name,
|
||||
offset + OFFSETS.name + of.maxPortNameLen
|
||||
)
|
||||
const name = buffer.toString('utf8', offset + OFFSETS.name, offset + OFFSETS.name + of.maxPortNameLen)
|
||||
body.name = name.substr(0, name.indexOf('\0')) // Remove useless 0 if name.length < of.maxPortNameLen
|
||||
|
||||
body.config = buffer.readUInt32BE(offset + OFFSETS.config)
|
||||
|
||||
@@ -5,12 +5,10 @@ import util from 'util'
|
||||
|
||||
export default {
|
||||
isEthMaskNone: (buffer, offset) =>
|
||||
buffer.readUInt32BE(offset) === 0x00000000 &&
|
||||
buffer.readUInt16BE(offset + 4) === 0x0000,
|
||||
buffer.readUInt32BE(offset) === 0x00000000 && buffer.readUInt16BE(offset + 4) === 0x0000,
|
||||
|
||||
isEthMaskAll: (buffer, offset) =>
|
||||
buffer.readUInt32BE(offset) === 0xffffffff &&
|
||||
buffer.readUInt16BE(offset + 4) === 0xffff,
|
||||
buffer.readUInt32BE(offset) === 0xffffffff && buffer.readUInt16BE(offset + 4) === 0xffff,
|
||||
|
||||
isIp4MaskNone: (buffer, offset) => buffer.readUInt32BE(offset) === 0x00000000,
|
||||
|
||||
|
||||
@@ -2,10 +2,6 @@ import assert from 'assert'
|
||||
|
||||
export default function get(map, key, errorMsg = undefined) {
|
||||
const value = map[String(key)]
|
||||
assert.notStrictEqual(
|
||||
value,
|
||||
undefined,
|
||||
errorMsg !== undefined ? errorMsg : `${key} is invalid`
|
||||
)
|
||||
assert.notStrictEqual(value, undefined, errorMsg !== undefined ? errorMsg : `${key} is invalid`)
|
||||
return value
|
||||
}
|
||||
|
||||
@@ -13,13 +13,9 @@ const openssl = (cmd, args, { input, ...opts } = {}) =>
|
||||
exports.genSelfSignedCert = async ({ days = 360 } = {}) => {
|
||||
const key = await openssl('genrsa', ['2048'])
|
||||
return {
|
||||
cert: await openssl(
|
||||
'req',
|
||||
['-batch', '-new', '-key', '-', '-x509', '-days', String(days), '-nodes'],
|
||||
{
|
||||
input: key,
|
||||
}
|
||||
),
|
||||
cert: await openssl('req', ['-batch', '-new', '-key', '-', '-x509', '-days', String(days), '-nodes'], {
|
||||
input: key,
|
||||
}),
|
||||
key,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -3,10 +3,7 @@ import escapeRegExp from 'lodash/escapeRegExp'
|
||||
const compareLengthDesc = (a, b) => b.length - a.length
|
||||
|
||||
export function compileTemplate(pattern, rules) {
|
||||
const matches = Object.keys(rules)
|
||||
.sort(compareLengthDesc)
|
||||
.map(escapeRegExp)
|
||||
.join('|')
|
||||
const matches = Object.keys(rules).sort(compareLengthDesc).map(escapeRegExp).join('|')
|
||||
const regExp = new RegExp(`\\\\(?:\\\\|${matches})|${matches}`, 'g')
|
||||
return (...params) =>
|
||||
pattern.replace(regExp, match => {
|
||||
|
||||
@@ -2,13 +2,10 @@
|
||||
import { compileTemplate } from '.'
|
||||
|
||||
it("correctly replaces the template's variables", () => {
|
||||
const replacer = compileTemplate(
|
||||
'{property}_\\{property}_\\\\{property}_{constant}_%_FOO',
|
||||
{
|
||||
'{property}': obj => obj.name,
|
||||
'{constant}': 1235,
|
||||
'%': (_, i) => i,
|
||||
}
|
||||
)
|
||||
const replacer = compileTemplate('{property}_\\{property}_\\\\{property}_{constant}_%_FOO', {
|
||||
'{property}': obj => obj.name,
|
||||
'{constant}': 1235,
|
||||
'%': (_, i) => i,
|
||||
})
|
||||
expect(replacer({ name: 'bar' }, 5)).toBe('bar_{property}_\\bar_1235_5_FOO')
|
||||
})
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/upload-ova",
|
||||
"version": "0.1.3",
|
||||
"version": "0.1.4",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Basic CLI to upload ova files to Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -35,6 +35,7 @@
|
||||
"dependencies": {
|
||||
"chalk": "^4.1.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"form-data": "^3.0.0",
|
||||
"fs-extra": "^9.0.0",
|
||||
"fs-promise": "^2.0.3",
|
||||
"get-stream": "^6.0.0",
|
||||
@@ -48,8 +49,8 @@
|
||||
"pw": "^0.0.4",
|
||||
"strip-indent": "^3.0.0",
|
||||
"xdg-basedir": "^4.0.0",
|
||||
"xo-lib": "^0.9.0",
|
||||
"xo-vmdk-to-vhd": "^1.3.1"
|
||||
"xo-lib": "^0.10.1",
|
||||
"xo-vmdk-to-vhd": "^2.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
import chalk from 'chalk'
|
||||
import execPromise from 'exec-promise'
|
||||
import FormData from 'form-data'
|
||||
import { createReadStream } from 'fs'
|
||||
import { stat } from 'fs-promise'
|
||||
import getStream from 'get-stream'
|
||||
@@ -23,11 +24,7 @@ import Xo from 'xo-lib'
|
||||
import { parseOVAFile } from 'xo-vmdk-to-vhd'
|
||||
|
||||
import pkg from '../package'
|
||||
import {
|
||||
load as loadConfig,
|
||||
set as setConfig,
|
||||
unset as unsetConfig,
|
||||
} from './config'
|
||||
import { load as loadConfig, set as setConfig, unset as unsetConfig } from './config'
|
||||
|
||||
function help() {
|
||||
return stripIndent(
|
||||
@@ -121,11 +118,7 @@ function nodeStringDecoder(buffer, encoder) {
|
||||
|
||||
export async function inspect(args) {
|
||||
const file = args[0]
|
||||
const data = await parseOVAFile(
|
||||
new NodeParsableFile(file, (await stat(file)).size),
|
||||
nodeStringDecoder,
|
||||
true
|
||||
)
|
||||
const data = await parseOVAFile(new NodeParsableFile(file, (await stat(file)).size), nodeStringDecoder, true)
|
||||
console.log('file metadata:', data)
|
||||
}
|
||||
|
||||
@@ -159,14 +152,10 @@ export async function upload(args) {
|
||||
overrides = parseOverride(args)
|
||||
}
|
||||
|
||||
const data = await parseOVAFile(
|
||||
new NodeParsableFile(file, (await stat(file)).size),
|
||||
nodeStringDecoder
|
||||
)
|
||||
const data = await parseOVAFile(new NodeParsableFile(file, (await stat(file)).size), nodeStringDecoder)
|
||||
const params = { sr: srId }
|
||||
const xo = await connect()
|
||||
const getXoObject = async filter =>
|
||||
Object.values(await xo.call('xo.getAllObjects', { filter }))[0]
|
||||
const getXoObject = async filter => Object.values(await xo.call('xo.getAllObjects', { filter }))[0]
|
||||
const sr = await getXoObject({ id: srId })
|
||||
const pool = await getXoObject({ id: sr.$poolId })
|
||||
const master = await getXoObject({ id: pool.master })
|
||||
@@ -199,8 +188,17 @@ export async function upload(args) {
|
||||
let keys, key, url
|
||||
if (isObject(result) && (keys = getKeys(result)).length === 1) {
|
||||
key = keys[0]
|
||||
|
||||
if (key === '$sendTo') {
|
||||
const formData = new FormData()
|
||||
if (data.tables !== undefined) {
|
||||
for (const k in data.tables) {
|
||||
const tables = await data.tables[k]
|
||||
delete data.tables[k]
|
||||
for (const l in tables) {
|
||||
formData.append(l, Buffer.from(tables[l]), k)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (typeof file !== 'string') {
|
||||
// eslint-disable-next-line no-throw-literal
|
||||
throw 'file parameter should be a path'
|
||||
@@ -218,16 +216,9 @@ export async function upload(args) {
|
||||
printProgress
|
||||
),
|
||||
])
|
||||
|
||||
formData.append('file', input, { filename: 'file', knownLength: length })
|
||||
try {
|
||||
return await hrp
|
||||
.post(url.toString(), {
|
||||
body: input,
|
||||
headers: {
|
||||
'content-length': length,
|
||||
},
|
||||
})
|
||||
.readAll('utf-8')
|
||||
return await hrp.post(url.toString(), { body: formData, headers: formData.getHeaders() }).readAll('utf-8')
|
||||
} catch (e) {
|
||||
console.log('ERROR', e)
|
||||
console.log('ERROR content', await e.response.readAll('utf-8'))
|
||||
@@ -260,10 +251,7 @@ export class NodeParsableFile {
|
||||
)
|
||||
// crazy stuff to get a browser-compatible ArrayBuffer from a node buffer
|
||||
// https://stackoverflow.com/a/31394257/72637
|
||||
return result.buffer.slice(
|
||||
result.byteOffset,
|
||||
result.byteOffset + result.byteLength
|
||||
)
|
||||
return result.buffer.slice(result.byteOffset, result.byteOffset + result.byteLength)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,9 +282,7 @@ export default async function main(args) {
|
||||
if (!args || !args.length || args[0] === '-h' || args[0] === '--help') {
|
||||
return help()
|
||||
}
|
||||
const fnName = args[0].replace(/^--|-\w/g, match =>
|
||||
match === '--' ? '' : match[1].toUpperCase()
|
||||
)
|
||||
const fnName = args[0].replace(/^--|-\w/g, match => (match === '--' ? '' : match[1].toUpperCase()))
|
||||
if (fnName in exports) {
|
||||
return exports[fnName](args.slice(1))
|
||||
}
|
||||
|
||||
181
CHANGELOG.md
181
CHANGELOG.md
@@ -1,8 +1,169 @@
|
||||
# ChangeLog
|
||||
|
||||
## **5.51.1** (2020-10-14)
|
||||
## **5.55.1** (2021-02-05)
|
||||
|
||||

|
||||
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [LDAP] "Synchronize LDAP groups" button: fix imported LDAP users not being correctly added or removed from groups in some cases (PR [#5545](https://github.com/vatesfr/xen-orchestra/pull/5545))
|
||||
- [VM migration] Fix `VIF_NOT_IN_MAP` error (PR [5544](https://github.com/vatesfr/xen-orchestra/pull/5544))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-auth-ldap 0.10.2
|
||||
- xo-server 5.74.1
|
||||
|
||||
## **5.55.0** (2021-01-29)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Web hooks] Possibility to wait a response from the server before continuing [#4948](https://github.com/vatesfr/xen-orchestra/issues/4948) (PR [#5420](https://github.com/vatesfr/xen-orchestra/pull/5420))
|
||||
- [XOA/update] Add a link to the channel's changelog (PR [#5494](https://github.com/vatesfr/xen-orchestra/pull/5494))
|
||||
- Assign custom date-time fields on pools, hosts, SRs, and VMs in advanced tab [#4730](https://github.com/vatesfr/xen-orchestra/issues/4730) (PR [#5473](https://github.com/vatesfr/xen-orchestra/pull/5473))
|
||||
- [Health] Show duplicated MAC addresses with their VIFs, VMs and networks [#5448](https://github.com/vatesfr/xen-orchestra/issues/5448) (PR [#5468](https://github.com/vatesfr/xen-orchestra/pull/5468))
|
||||
- [Pool/advanced] Ability to define default migration network [#3788](https://github.com/vatesfr/xen-orchestra/issues/3788#issuecomment-743207834) (PR [#5465](https://github.com/vatesfr/xen-orchestra/pull/5465))
|
||||
- [Proxy] Support metadata backups (PRs [#5499](https://github.com/vatesfr/xen-orchestra/pull/5499) [#5517](https://github.com/vatesfr/xen-orchestra/pull/5517) [#5519](https://github.com/vatesfr/xen-orchestra/pull/5519) [#5520](https://github.com/vatesfr/xen-orchestra/pull/5520))
|
||||
- [VM/console] Add button to connect to the VM via the local RDP client [#5495](https://github.com/vatesfr/xen-orchestra/issues/5495) (PR [#5523](https://github.com/vatesfr/xen-orchestra/pull/5523))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Host/stats] Show interfaces' names in graph "Network throughput" instead of PIFs' indices (PR [#5483](https://github.com/vatesfr/xen-orchestra/pull/5483))
|
||||
- [Metadata backups] Ability to link a backup to a proxy (PR [#4206](https://github.com/vatesfr/xen-orchestra/pull/4206))
|
||||
- [VM] Ability to set guest secure boot (guest secure boot is available soon in XCP-ng) [#5502](https://github.com/vatesfr/xen-orchestra/issues/5502) (PR [#5527](https://github.com/vatesfr/xen-orchestra/pull/5527))
|
||||
- [Proxy] Improve upgrade feedback (PR [#5525](https://github.com/vatesfr/xen-orchestra/pull/5525))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [VM/network] Change VIF's locking mode automatically to `locked` when adding allowed IPs (PR [#5472](https://github.com/vatesfr/xen-orchestra/pull/5472))
|
||||
- [Backup Reports] Don't hide errors during plugin test [#5486](https://github.com/vatesfr/xen-orchestra/issues/5486) (PR [#5491](https://github.com/vatesfr/xen-orchestra/pull/5491))
|
||||
- [Backup reports] Fix malformed sent email in case of multiple VMs (PR [#5479](https://github.com/vatesfr/xen-orchestra/pull/5479))
|
||||
- [Restore/metadata] Ignore disabled remotes on listing backups (PR [#5504](https://github.com/vatesfr/xen-orchestra/pull/5504))
|
||||
- [VM/network] Change VIF's locking mode automatically to `network_default` when changing network (PR [#5500](https://github.com/vatesfr/xen-orchestra/pull/5500))
|
||||
- [Backup/S3] Fix `TimeoutError: Connection timed out after 120000ms` (PR [#5456](https://github.com/vatesfr/xen-orchestra/pull/5456))
|
||||
- [New SR/reattach SR] Fix SR not being properly reattached to hosts [#4546](https://github.com/vatesfr/xen-orchestra/issues/4546) (PR [#5488](https://github.com/vatesfr/xen-orchestra/pull/5488))
|
||||
- [Home/pool] Missing patches warning: fix 1 patch showing as missing in case of error [#4922](https://github.com/vatesfr/xen-orchestra/issues/4922)
|
||||
- [Proxy/remote] Fix error not updated on remote test (PR [#5514](https://github.com/vatesfr/xen-orchestra/pull/5514))
|
||||
- [Home/SR] Sort SR usage in % instead of bytes [#5463](https://github.com/vatesfr/xen-orchestra/issues/5463) (PR [#5513](https://github.com/vatesfr/xen-orchestra/pull/5513))
|
||||
- [VM migration] Fix `SR_NOT_ATTACHED` error when migration network is selected (PR [#5516](https://github.com/vatesfr/xen-orchestra/pull/5516))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/fs 0.12.1
|
||||
- xo-server-backup-reports 0.16.8
|
||||
- xo-server 5.74.0
|
||||
- xo-web 5.77.0
|
||||
- xo-server-web-hooks 0.3.0
|
||||
|
||||
## **5.54.0** (2020-12-29)
|
||||
|
||||
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Home] Ability to sort VMs by total disks physical usage (PR [#5418](https://github.com/vatesfr/xen-orchestra/pull/5418))
|
||||
- [Home/VM] Ability to choose network for bulk migration within a pool (PR [#5427](https://github.com/vatesfr/xen-orchestra/pull/5427))
|
||||
- [Host] Ability to set host control domain memory [#2218](https://github.com/vatesfr/xen-orchestra/issues/2218) (PR [#5437](https://github.com/vatesfr/xen-orchestra/pull/5437))
|
||||
- [Patches] Rolling pool update: automatically patch and restart a whole pool by live migrating running VMs back and forth as needed [#5286](https://github.com/vatesfr/xen-orchestra/issues/5286) (PR [#5430](https://github.com/vatesfr/xen-orchestra/pull/5430))
|
||||
- [Host] Replace `disabled/enabled state` by `maintenance mode` (PR [#5421](https://github.com/vatesfr/xen-orchestra/pull/5421))
|
||||
- [Dashboard/Overview] Filter out `udev` SRs [#5423](https://github.com/vatesfr/xen-orchestra/issues/5423) (PR [#5453](https://github.com/vatesfr/xen-orchestra/pull/5453))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Plugins] Add user feedback when a plugin test finishes successfully (PR [#5409](https://github.com/vatesfr/xen-orchestra/pull/5409))
|
||||
- [New HBA SR] Show LUN serial and id in LUN selector (PR [#5422](https://github.com/vatesfr/xen-orchestra/pull/5422))
|
||||
- [Proxy] Ability to delete VM backups (PR [#5428](https://github.com/vatesfr/xen-orchestra/pull/5428))
|
||||
- [VM/disks, SR/disks] Destroy/forget VDIs: improve tooltip messages (PR [#5435](https://github.com/vatesfr/xen-orchestra/pull/5435))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Host] Fix `an error has occurred` on accessing a host's page (PR [#5417](https://github.com/vatesfr/xen-orchestra/pull/5417))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-web 5.76.0
|
||||
- xo-server 5.73.0
|
||||
|
||||
## **5.53.1** (2020-12-10)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [OVA/import] Fix OVA CLI import tool (PR [#5432](https://github.com/vatesfr/xen-orchestra/pull/5432))
|
||||
- [Jobs] Fix `Cannot read property id of undefined` error when running a job without a schedule [#5425](https://github.com/vatesfr/xen-orchestra/issues/5425) (PR [#5426](https://github.com/vatesfr/xen-orchestra/pull/5426))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/upload-ova 0.1.4
|
||||
- xo-server 5.72.0
|
||||
|
||||
## **5.53.0** (2020-11-30)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [LDAP] Prevent LDAP-provided groups from being edited from XO [#1884](https://github.com/vatesfr/xen-orchestra/issues/1884) (PR [#5351](https://github.com/vatesfr/xen-orchestra/pull/5351))
|
||||
- [Licensing] Allow Free and Starter users to copy VMs and create a VM from snapshot on the same pool [#4890](https://github.com/vatesfr/xen-orchestra/issues/4890) (PR [5333](https://github.com/vatesfr/xen-orchestra/pull/5333))
|
||||
- [SR] Use SR type `zfs` instead of `file` for ZFS storage repositories (PR [5302](https://github.com/vatesfr/xen-orchestra/pull/5330))
|
||||
- [Dashboard/Health] List VMs with missing or outdated guest tools (PR [#5376](https://github.com/vatesfr/xen-orchestra/pull/5376))
|
||||
- [VIF] Ability for admins to set any allowed IPs, including IPv6 and IPs that are not in an IP pool [#2535](https://github.com/vatesfr/xen-orchestra/issues/2535) [#1872](https://github.com/vatesfr/xen-orchestra/issues/1872) (PR [#5367](https://github.com/vatesfr/xen-orchestra/pull/5367))
|
||||
- [Proxy] Ability to restore a file from VM backup (PR [#5359](https://github.com/vatesfr/xen-orchestra/pull/5359))
|
||||
- [Web Hooks] `backupNg.runJob` is now triggered by scheduled runs [#5205](https://github.com/vatesfr/xen-orchestra/issues/5205) (PR [#5360](https://github.com/vatesfr/xen-orchestra/pull/5360))
|
||||
- [Licensing] Add trial end information banner (PR [#5374](https://github.com/vatesfr/xen-orchestra/pull/5374))
|
||||
- Assign custom fields on pools, hosts, SRs, and VMs in advanced tab [#4730](https://github.com/vatesfr/xen-orchestra/issues/4730) (PR [#5387](https://github.com/vatesfr/xen-orchestra/pull/5387))
|
||||
- Ability to change the number of items displayed per table or page (PR [#5355](https://github.com/vatesfr/xen-orchestra/pull/5355))
|
||||
- [VM] Handle setting memory when DMC is disabled [#4978](https://github.com/vatesfr/xen-orchestra/issues/4978) & [#5326](https://github.com/vatesfr/xen-orchestra/issues/5326) (PR [#5412](https://github.com/vatesfr/xen-orchestra/pull/5412))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Remotes/NFS] Only mount with `vers=3` when no other options [#4940](https://github.com/vatesfr/xen-orchestra/issues/4940) (PR [#5354](https://github.com/vatesfr/xen-orchestra/pull/5354))
|
||||
- [VM/network] Don't change VIF's locking mode automatically (PR [#5357](https://github.com/vatesfr/xen-orchestra/pull/5357))
|
||||
- [Import OVA] Fix 'Max payload size exceeded' error when importing huge OVAs (PR [#5372](https://github.com/vatesfr/xen-orchestra/pull/5372))
|
||||
- [Backup] Make backup directories only accessible by root users (PR [#5378](https://github.com/vatesfr/xen-orchestra/pull/5378))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-auth-ldap 0.10.1
|
||||
- @vates/multi-key-map 0.1.0
|
||||
- @xen-orchestra/fs 0.12.0
|
||||
- vhd-lib 1.0.0
|
||||
- xo-vmdk-to-vhd 2.0.0
|
||||
- xo-server-web-hooks 0.2.0
|
||||
- xo-server 5.71.2
|
||||
- xo-web 5.75.0
|
||||
|
||||
## **5.52.0** (2020-10-30)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Host/Advanced] Display installed certificates with ability to install a new certificate [#5134](https://github.com/vatesfr/xen-orchestra/issues/5134) (PRs [#5319](https://github.com/vatesfr/xen-orchestra/pull/5319) [#5332](https://github.com/vatesfr/xen-orchestra/pull/5332))
|
||||
- [VM/network] Allow Self Service users to change a VIF's network [#5020](https://github.com/vatesfr/xen-orchestra/issues/5020) (PR [#5203](https://github.com/vatesfr/xen-orchestra/pull/5203))
|
||||
- [Host/Advanced] Ability to change the scheduler granularity. Only available on XCP-ng >= 8.2 [#5291](https://github.com/vatesfr/xen-orchestra/issues/5291) (PR [#5320](https://github.com/vatesfr/xen-orchestra/pull/5320))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [New SSH key] Show warning when the SSH key already exists (PR [#5329](https://github.com/vatesfr/xen-orchestra/pull/5329))
|
||||
- [Pool/Network] Add a tooltip to the `Automatic` column (PR [#5345](https://github.com/vatesfr/xen-orchestra/pull/5345))
|
||||
- [LDAP] Ability to force group synchronization [#1884](https://github.com/vatesfr/xen-orchestra/issues/1884) (PR [#5343](https://github.com/vatesfr/xen-orchestra/pull/5343))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Host] Fix power state stuck on busy after power off [#4919](https://github.com/vatesfr/xen-orchestra/issues/4919) (PR [#5288](https://github.com/vatesfr/xen-orchestra/pull/5288))
|
||||
- [VM/Network] Don't allow users to change a VIF's locking mode if they don't have permissions on the network (PR [#5283](https://github.com/vatesfr/xen-orchestra/pull/5283))
|
||||
- [Backup/overview] Add tooltip on the running backup job button (PR [#5325 ](https://github.com/vatesfr/xen-orchestra/pull/5325))
|
||||
- [VM] Show snapshot button in toolbar for Self Service users (PR [#5324](https://github.com/vatesfr/xen-orchestra/pull/5324))
|
||||
- [User] Fallback to default filter on resetting customized filter (PR [#5321](https://github.com/vatesfr/xen-orchestra/pull/5321))
|
||||
- [Home] Show error notification when bulk VM snapshot fails (PR [#5323](https://github.com/vatesfr/xen-orchestra/pull/5323))
|
||||
- [Backup] Skip VMs currently migrating
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-auth-ldap 0.10.0
|
||||
- vhd-lib 0.8.0
|
||||
- @xen-orchestra/audit-core 0.2.0
|
||||
- xo-server-audit 0.9.0
|
||||
- xo-web 5.74.0
|
||||
- xo-server 5.70.0
|
||||
|
||||
## **5.51.1** (2020-10-14)
|
||||
|
||||
### Enhancements
|
||||
|
||||
@@ -30,7 +191,7 @@
|
||||
- Fix `not enough permissions` error when accessing some pages as a Self Service user (PR [#5303](https://github.com/vatesfr/xen-orchestra/pull/5303))
|
||||
- [VM] Explicit error when VM migration failed due to unset default SR on destination pool [#5282](https://github.com/vatesfr/xen-orchestra/issues/5282) (PR [#5306](https://github.com/vatesfr/xen-orchestra/pull/5306))
|
||||
|
||||
### Packages to release
|
||||
### Released packages
|
||||
|
||||
- xo-server-sdn-controller 1.0.4
|
||||
- xo-server-backup-reports 0.16.7
|
||||
@@ -63,7 +224,7 @@
|
||||
- [Import OVA] Improve import speed of embedded gzipped VMDK disks (PR [#5275](https://github.com/vatesfr/xen-orchestra/pull/5275))
|
||||
- [Remotes] Fix editing bucket and directory for S3 remotes [#5233](https://github.com/vatesfr/xen-orchestra/issues/5233) (PR [5276](https://github.com/vatesfr/xen-orchestra/pull/5276))
|
||||
|
||||
### Packages to release
|
||||
### Released packages
|
||||
|
||||
- xo-server-auth-ldap 0.9.0
|
||||
- @xen-orchestra/fs 0.11.1
|
||||
@@ -73,9 +234,7 @@
|
||||
|
||||
## **5.50.3** (2020-09-17)
|
||||
|
||||

|
||||
|
||||
### Packages to release
|
||||
### Released packages
|
||||
|
||||
- xo-server-audit 0.8.0
|
||||
|
||||
@@ -91,7 +250,7 @@
|
||||
- [New SR] Fix `Cannot read property 'trim' of undefined` error (PR [#5212](https://github.com/vatesfr/xen-orchestra/pull/5212))
|
||||
- [Dashboard/Health] Fix suspended VDIs considered as orphans [#5248](https://github.com/vatesfr/xen-orchestra/issues/5248) (PR [#5249](https://github.com/vatesfr/xen-orchestra/pull/5249))
|
||||
|
||||
### Packages to release
|
||||
### Released packages
|
||||
|
||||
- xo-server-audit 0.7.2
|
||||
- xo-web 5.70.0
|
||||
@@ -107,7 +266,7 @@
|
||||
|
||||
- [VM/Network] Fix TX checksumming [#5234](https://github.com/vatesfr/xen-orchestra/issues/5234)
|
||||
|
||||
### Packages to release
|
||||
### Released packages
|
||||
|
||||
- xo-server-usage-report 0.9.0
|
||||
- xo-server-audit 0.7.1
|
||||
@@ -137,7 +296,7 @@
|
||||
- [Audit] Obfuscate sensitive data in `user.changePassword` action's records [#5219](https://github.com/vatesfr/xen-orchestra/issues/5219) (PR [#5220](https://github.com/vatesfr/xen-orchestra/pull/5220))
|
||||
- [SDN Controller] Fix `Cannot read property '$network' of undefined` error at the network creation (PR [#5217](https://github.com/vatesfr/xen-orchestra/pull/5217))
|
||||
|
||||
### Packages to release
|
||||
### Released packages
|
||||
|
||||
- xo-server-audit 0.7.0
|
||||
- xo-server-sdn-controller 1.0.3
|
||||
@@ -154,7 +313,7 @@
|
||||
|
||||
- [Patches] Don't log errors related to missing patches listing (Previous fix in 5.48.3 was not working)
|
||||
|
||||
### Packages to release
|
||||
### Released packages
|
||||
|
||||
- xo-server 5.64.1
|
||||
- xo-server-sdn-controller 1.0.2
|
||||
|
||||
@@ -7,15 +7,16 @@
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Host/Advanced] Display installed certificates [#5134](https://github.com/vatesfr/xen-orchestra/issues/5134) (PR [#5319](https://github.com/vatesfr/xen-orchestra/pull/5319))
|
||||
- [VM/network] Allow Self Service users to change a VIF's network [#5020](https://github.com/vatesfr/xen-orchestra/issues/5020) (PR [#5203](https://github.com/vatesfr/xen-orchestra/pull/5203))
|
||||
- [Task] Display age and estimated duration (PR [#5530](https://github.com/vatesfr/xen-orchestra/pull/5530))
|
||||
- [Proxy] Ask for a confirmation before upgrading a proxy with running backups (PR [#5533](https://github.com/vatesfr/xen-orchestra/pull/5533))
|
||||
- [Backup/restore] Allow backup restore to any licence even if XOA isn't registered (PR [#5547](https://github.com/vatesfr/xen-orchestra/pull/5547))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [Host] Fix power state stuck on busy after power off [#4919](https://github.com/vatesfr/xen-orchestra/issues/4919) (PR [#5288](https://github.com/vatesfr/xen-orchestra/pull/5288))
|
||||
- [VM/Network] Don't allow users to change a VIF's locking mode if they don't have permissions on the network (PR [#5283](https://github.com/vatesfr/xen-orchestra/pull/5283))
|
||||
- [VM/Snapshot export] Fix `Error: no available place in queue` on canceling an export via browser then starting a new one when the concurrency threshold is reached [#5535](https://github.com/vatesfr/xen-orchestra/issues/5535) (PR [#5538](https://github.com/vatesfr/xen-orchestra/pull/5538))
|
||||
- [Servers] Hide pool's objects if its master is unreachable [#5475](https://github.com/vatesfr/xen-orchestra/issues/5475) (PR [#5526](https://github.com/vatesfr/xen-orchestra/pull/5526))
|
||||
|
||||
### Packages to release
|
||||
|
||||
@@ -34,8 +35,8 @@
|
||||
>
|
||||
> In case of conflict, the highest (lowest in previous list) `$version` wins.
|
||||
|
||||
- vhd-lib minor
|
||||
- @xen-orchestra/audit-core minor
|
||||
- xo-server-audit minor
|
||||
- xo-web minor
|
||||
- @xen-orchestra/fs patch
|
||||
- xen-api patch
|
||||
- xo-common minor
|
||||
- xo-server minor
|
||||
- xo-web minor
|
||||
|
||||
@@ -35,6 +35,7 @@ module.exports = {
|
||||
['/supported_hosts', 'Host Compatibility List'],
|
||||
['/installation', 'Installation'],
|
||||
['/configuration', 'Configuration'],
|
||||
['/migrate_to_new_xoa', 'Migrate to new XOA'],
|
||||
['/updater', 'Updates'],
|
||||
['/architecture', 'Architecture'],
|
||||
['/troubleshooting', 'Troubleshooting'],
|
||||
@@ -92,10 +93,7 @@ module.exports = {
|
||||
collapsable: false, // optional, defaults to true
|
||||
sidebarDepth: 1, // optional, defaults to 1
|
||||
children: [
|
||||
[
|
||||
'https://github.com/vatesfr/xen-orchestra/blob/master/CHANGELOG.md#changelog',
|
||||
'Changelog',
|
||||
],
|
||||
['https://github.com/vatesfr/xen-orchestra/blob/master/CHANGELOG.md#changelog', 'Changelog'],
|
||||
['/code_of_conduct', 'Code of Conduct'],
|
||||
['/contributing', 'Contributing'],
|
||||
['/licenses', 'Licenses'],
|
||||
|
||||
@@ -172,11 +172,7 @@ const handleHook = data => {
|
||||
const { method, params, type, result, error, timestamp } = JSON.parse(data)
|
||||
|
||||
// Log it
|
||||
console.log(
|
||||
`${new Date(timestamp).toISOString()} [${method}|${type}] ${params} → ${
|
||||
result || error
|
||||
}`
|
||||
)
|
||||
console.log(`${new Date(timestamp).toISOString()} [${method}|${type}] ${params} → ${result || error}`)
|
||||
|
||||
// Run scripts
|
||||
exec(`./hook-scripts/${method}-${type}.sh`)
|
||||
|
||||
BIN
docs/assets/audit_log_configuration.png
Normal file
BIN
docs/assets/audit_log_configuration.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 26 KiB |
BIN
docs/assets/exportModal.png
Normal file
BIN
docs/assets/exportModal.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 26 KiB |
BIN
docs/assets/importModal.png
Normal file
BIN
docs/assets/importModal.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 32 KiB |
BIN
docs/assets/ldapgroupconfig.png
Normal file
BIN
docs/assets/ldapgroupconfig.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 89 KiB |
BIN
docs/assets/maintenancemode.png
Normal file
BIN
docs/assets/maintenancemode.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 21 KiB |
@@ -316,3 +316,12 @@ This is how it currently works in Xen Orchestra. But sometimes, you also want to
|
||||
:::
|
||||
|
||||
If you job contains 50 VMs for example, you could specify a sequential backup with a limit of "25 at once" (enter 25 in the concurrency field). This means at 3 AM, we'll do 25 snapshots (2 at a time), then exports. As soon as the first VM backup is completely finished (snapshot removed), then we'll start the 26th and so on, to always keep a max of 25x VM backups going in parallel.
|
||||
|
||||
## Backup modifier tags
|
||||
|
||||
When a backup job is configured using Normal snapshot mode, it's possible to use VM tags to apply a different snapshot mode to individual VMs.
|
||||
|
||||
- **xo-offline-backup** to apply offline snapshotting mode (VM with be shut down prior to snapshot)
|
||||
- **xo-memory-backup** to apply RAM-enabled snapshotting
|
||||
|
||||
For example, you could have a regular backup job with 10 VMs configured with Normal snapshotting, including two which are database servers. Since database servers are generally more sensitive to being restored from snapshots, you could apply the **xo-memory-backup** tag to those two VMs and only those will be backed up in RAM-enabled mode. This will avoid the need to manage a separate backup job and schedule.
|
||||
|
||||
@@ -99,6 +99,18 @@ Putting it altogether and putting our values and UUID's into the command, it wil
|
||||
xo-cr-seed https://root:password@xen1.company.tld 4a21c1cd-e8bd-4466-910a-f7524ecc07b1 https://root:password@xen2.company.tld 5aaf86ca-ae06-4a4e-b6e1-d04f0609e64d 90d11a94-a88f-4a84-b7c1-ed207d3de2f9 369a26f0-da77-41ab-a998-fa6b02c69b9a
|
||||
```
|
||||
|
||||
:::warning
|
||||
If the username or the password for your XCP-ng/XenServer hosts contains special characters, they must use [percent encoding](https://en.wikipedia.org/wiki/Percent-encoding).
|
||||
|
||||
An easy way to do this with Node in command line:
|
||||
|
||||
```
|
||||
> node -p 'encodeURIComponent(process.argv[1])' -- 'password with special chars :#@'
|
||||
password%20with%20special%20chars%20%3A%23%40
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
### Finished
|
||||
|
||||
Your backup job should now be working correctly! Manually run the job the first time to check if everything is OK. Then, enable the job. **Now, only the deltas are sent, your initial seed saved you a LOT of time if you have a slow network.**
|
||||
|
||||
@@ -422,6 +422,29 @@ It works even if the VM is running, because we'll automatically export a snapsho
|
||||
|
||||
In the VM "Snapshots" tab, you can also export a snapshot like you export a VM.
|
||||
|
||||
## Hosts management
|
||||
|
||||
Outside updates (see next section), you can also do host management via Xen Orchestra. Basic operations are supported, like reboot, shutdown and so on.
|
||||
|
||||
But there's also some specific things, like Maintenance mode.
|
||||
|
||||
### Maintenance mode
|
||||
|
||||
:::warning
|
||||
If you are using HA, maintenance mode will be required before doing any reboot or update. NEVER forget to enable maintenance in HA!
|
||||
:::
|
||||
|
||||
Maintenance mode will trigger two actions internally:
|
||||
|
||||
* disabling the host (no new VMs could start on it)
|
||||
* evacuate VMs that can be evacuated ("agile" VMs, which could be live migrated)
|
||||
|
||||
It's perfect if you want to shutdown the host for hardware replacement, or if you want to do some other operations without disrupting your production.
|
||||
|
||||

|
||||
|
||||
Note that maintenance mode will be **automatically exited after a host reboot**.
|
||||
|
||||
## Hosts updates
|
||||
|
||||
Patching a host manually can be time consuming (and boring). That's why we provide the high level feature of downloading and applying all missing patches automatically.
|
||||
|
||||
25
docs/migrate_to_new_xoa.md
Normal file
25
docs/migrate_to_new_xoa.md
Normal file
@@ -0,0 +1,25 @@
|
||||
### Deploy new appliance
|
||||
|
||||
First step, you have to deploy a new appliance. All needed information is [here](installation.md)
|
||||
|
||||
### Export configuration
|
||||
|
||||
To export your current configuration, navigate to **Settings** -> **Config**.
|
||||
Here you will see a button labeled **_Download current config_**. Click on it, then an export modal will appear.
|
||||
|
||||

|
||||
|
||||
You can set a passphrase to encrypt the exported configuration.
|
||||
|
||||
### Import configuration
|
||||
|
||||
Now it's time to import your configuration to the new appliance.
|
||||
Go to the **Settings** -> **Config** page of your new appliance. Here you have an **_import_** section where you can drag and drop your exported configuration file.
|
||||
|
||||

|
||||
|
||||
When your configuration is loaded, click to import. A new modal will appear to ask you the passphrase to decrypt your configuration. If you didn't set a passphrase when you exported your configuration, leave it empty.
|
||||
|
||||
### Advanced users
|
||||
|
||||
If you made custom adjustments to the `/etc/xo-server` config file on your previous appliance, unfortunately you will have to recreate these modifications on the new appliance.
|
||||
@@ -19,6 +19,7 @@ Xen Orchestra should be fully functional with any version of these two virtualiz
|
||||
Xen Orchestra and XCP-ng are mainly edited by the same company ([Vates](https://vates.fr)). That's why you are sure to have the best compatibility with both XCP-ng and XO! Also, we strongly suggest people to keep using the latest XCP-ng version as far as possible (or N-1).
|
||||
:::
|
||||
|
||||
- XCP-ng 8.2 LTS ✅ 🚀
|
||||
- XCP-ng 8.1 ✅ 🚀
|
||||
- XCP-ng 8.0 ✅
|
||||
- XCP-ng 7.6 ✅ ❗
|
||||
|
||||
@@ -48,6 +48,53 @@ LDAP Filters allow you to properly match your user. It's not an easy task to alw
|
||||
|
||||
After finishing the configuration, you can try to log in with your LDAP username and password. Finally, right after your initial successful log in, your account will be visible in the user list of Xen Orchestra.
|
||||
|
||||
#### Groups
|
||||
|
||||
The LDAP plugin allows you to synchronize user groups. To configure the synchronization, check the checkbox next to **Synchronize groups** and fill out the configuration:
|
||||
|
||||

|
||||
|
||||
- **Base and filter**: similar to the user configuration. The plugin needs an entry point in the directory and a filter to find the groups.
|
||||
- **ID attribute**: the attribute that the plugin will use to uniquely identify each group. It must be unique across groups and must not change over time. On each synchronization, the plugin will compare LDAP groups with XO groups, then try to match them based on this attribute and create/update XO groups if necessary.
|
||||
- **Display name attribute**: the attribute that will be used as the group's name in XO.
|
||||
- **Members mapping**: this part of the configuration is used to determine which LDAP users belong to which LDAP groups. Given an LDAP directory that looks like this:
|
||||
|
||||
User:
|
||||
|
||||
```
|
||||
objectClass: Person
|
||||
cn: Bruce Wayne
|
||||
uid: 347
|
||||
...
|
||||
```
|
||||
|
||||
Group:
|
||||
|
||||
```
|
||||
objectClass: Group
|
||||
cn: heroes
|
||||
displayName: Heroes
|
||||
gid: 456
|
||||
member: 347
|
||||
member: 348
|
||||
...
|
||||
```
|
||||
|
||||
The plugin needs to know that Bruce Wayne belongs to the heroes group. To do so, you need to set 2 entries in the configuration:
|
||||
|
||||
- **Group attribute**, which is the name of the *group* attribute that is used to list users within a group. In this example, it would be `member`.
|
||||
- **User attribute**, which is the name of the *user* attribute that is used to reference users in groups. In this example, it would be `uid` since `347`, `348`, etc. are user `uid`s.
|
||||
|
||||
Save the configuration and you're good to go. From now on, every time an LDAP user logs into XO, the plugin will automatically create or update that user's groups and add them to those groups. If you need to import all the groups at once, you can do so from Settings > Groups > Synchronize LDAP Groups. This can be useful if you want to assign ACLs on groups without having to wait for a member of the group to log in.
|
||||
|
||||
:::tip
|
||||
Importing the groups doesn't import their members. The users will still be imported one by one when they log in for the first time.
|
||||
:::
|
||||
|
||||
:::tip
|
||||
You can find the LDAP users by entering this filter in the users table: `authProviders:ldap?`.
|
||||
:::
|
||||
|
||||
### SAML
|
||||
|
||||
This plugin allows SAML users to authenticate to Xen-Orchestra.
|
||||
@@ -278,6 +325,34 @@ Now, your authorized users can create VMs with their SSH keys, grow template dis
|
||||
|
||||

|
||||
|
||||
## Audit log
|
||||
|
||||
XO Audit Log is a plugin that records all important actions performed by users and provides the administrators an overview of each action. This gives them an idea of the users behavior regarding their infrastructure in order to track suspicious activities.
|
||||
|
||||
### How does it work?
|
||||
|
||||
XO Audit Log listens to important actions performed by users and stores them in the XOA database using the [hash chain structure](https://en.wikipedia.org/wiki/Hash_chain).
|
||||
|
||||
### Trustability of the records
|
||||
|
||||
Stored records are secured by:
|
||||
|
||||
- structure: records are chained using the hash chain structure which means that each record is linked to its parent in a cryptographically secure way. This structure prevents the alteration of old records.
|
||||
|
||||
- hash upload: the hash chain structure has limits, it does not protect from the rewrite of recent/all records. To reduce this risk, the Audit log plugin regularly uploads the last record hash to our database after checking the integrity of the whole record chain. This functionality keeps the records safe by notifying users in case of alteration of the records.
|
||||
|
||||
### Configuration
|
||||
|
||||
The recording of the users' actions is disabled by default. To enable it:
|
||||
|
||||
1. go into `settings/plugins`
|
||||
2. expand the `audit` configuration
|
||||
3. toggle active and save the configuration
|
||||
|
||||

|
||||
|
||||
Now, the audit plugin will record users' actions and upload the last record in the chain every day at **06:00 AM (UTC)**.
|
||||
|
||||
## Debugging
|
||||
|
||||
If you can't log in, please check the logs of `xo-server` while you attempt to connect. It will give you hints about the error encountered. You can do that with a `tail -f /var/log/syslog -n 100` on your XOA.
|
||||
|
||||
4
flow-typed/limit-concurrency-decorator.js
vendored
4
flow-typed/limit-concurrency-decorator.js
vendored
@@ -1,6 +1,4 @@
|
||||
declare module 'limit-concurrency-decorator' {
|
||||
declare function limitConcurrencyDecorator(
|
||||
concurrency: number
|
||||
): <T: Function>(T) => T
|
||||
declare function limitConcurrencyDecorator(concurrency: number): <T: Function>(T) => T
|
||||
declare export default typeof limitConcurrencyDecorator
|
||||
}
|
||||
|
||||
30
flow-typed/lodash.js
vendored
30
flow-typed/lodash.js
vendored
@@ -1,33 +1,15 @@
|
||||
declare module 'lodash' {
|
||||
declare export function countBy<K, V>(
|
||||
object: { [K]: V },
|
||||
iteratee: K | ((V, K) => string)
|
||||
): { [string]: number }
|
||||
declare export function forEach<K, V>(
|
||||
object: { [K]: V },
|
||||
iteratee: (V, K) => void
|
||||
): void
|
||||
declare export function groupBy<K, V>(
|
||||
object: { [K]: V },
|
||||
iteratee: K | ((V, K) => string)
|
||||
): { [string]: V[] }
|
||||
declare export function countBy<K, V>(object: { [K]: V }, iteratee: K | ((V, K) => string)): { [string]: number }
|
||||
declare export function forEach<K, V>(object: { [K]: V }, iteratee: (V, K) => void): void
|
||||
declare export function groupBy<K, V>(object: { [K]: V }, iteratee: K | ((V, K) => string)): { [string]: V[] }
|
||||
declare export function invert<K, V>(object: { [K]: V }): { [V]: K }
|
||||
declare export function isEmpty(mixed): boolean
|
||||
declare export function keyBy<T>(array: T[], iteratee: string): boolean
|
||||
declare export function last<T>(array?: T[]): T | void
|
||||
declare export function map<T1, T2>(
|
||||
collection: T1[],
|
||||
iteratee: (T1) => T2
|
||||
): T2[]
|
||||
declare export function mapValues<K, V1, V2>(
|
||||
object: { [K]: V1 },
|
||||
iteratee: (V1, K) => V2
|
||||
): { [K]: V2 }
|
||||
declare export function map<T1, T2>(collection: T1[], iteratee: (T1) => T2): T2[]
|
||||
declare export function mapValues<K, V1, V2>(object: { [K]: V1 }, iteratee: (V1, K) => V2): { [K]: V2 }
|
||||
declare export function noop(...args: mixed[]): void
|
||||
declare export function some<T>(
|
||||
collection: T[],
|
||||
iteratee: (T, number) => boolean
|
||||
): boolean
|
||||
declare export function some<T>(collection: T[], iteratee: (T, number) => boolean): boolean
|
||||
declare export function sum(values: number[]): number
|
||||
declare export function values<K, V>(object: { [K]: V }): V[]
|
||||
}
|
||||
|
||||
4
flow-typed/promise-toolbox.js
vendored
4
flow-typed/promise-toolbox.js
vendored
@@ -8,9 +8,7 @@ declare module 'promise-toolbox' {
|
||||
reject: T => void,
|
||||
resolve: T => void,
|
||||
|}
|
||||
declare export function fromCallback<T>(
|
||||
(cb: (error: any, value: T) => void) => void
|
||||
): Promise<T>
|
||||
declare export function fromCallback<T>((cb: (error: any, value: T) => void) => void): Promise<T>
|
||||
declare export function fromEvent(emitter: mixed, string): Promise<mixed>
|
||||
declare export function ignoreErrors(): Promise<void>
|
||||
declare export function timeout<T>(delay: number): Promise<T>
|
||||
|
||||
20
package.json
20
package.json
@@ -7,17 +7,16 @@
|
||||
"babel-jest": "^26.0.1",
|
||||
"benchmark": "^2.1.4",
|
||||
"eslint": "^7.6.0",
|
||||
"eslint-config-prettier": "^6.0.0",
|
||||
"eslint-config-standard": "^14.1.0",
|
||||
"eslint-config-standard-jsx": "^8.1.0",
|
||||
"eslint-plugin-eslint-comments": "^3.1.1",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-config-prettier": "^7.1.0",
|
||||
"eslint-config-standard": "^16.0.2",
|
||||
"eslint-config-standard-jsx": "^10.0.0",
|
||||
"eslint-plugin-eslint-comments": "^3.2.0",
|
||||
"eslint-plugin-import": "^2.22.1",
|
||||
"eslint-plugin-node": "^11.1.0",
|
||||
"eslint-plugin-promise": "^4.0.0",
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"eslint-plugin-promise": "^4.2.1",
|
||||
"eslint-plugin-react": "^7.21.5",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.131.0",
|
||||
"flow-bin": "^0.142.0",
|
||||
"globby": "^11.0.1",
|
||||
"handlebars": "^4.7.6",
|
||||
"husky": "^4.2.5",
|
||||
@@ -25,7 +24,7 @@
|
||||
"lint-staged": "^10.2.7",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^2.0.5",
|
||||
"promise-toolbox": "^0.15.0",
|
||||
"promise-toolbox": "^0.16.0",
|
||||
"sorted-object": "^2.0.1",
|
||||
"vuepress": "^1.4.1"
|
||||
},
|
||||
@@ -40,7 +39,6 @@
|
||||
"jest": {
|
||||
"collectCoverage": true,
|
||||
"moduleNameMapper": {
|
||||
"^.": "./src",
|
||||
"^(@vates/[^/]+)": "$1/src",
|
||||
"^(@xen-orchestra/[^/]+)": "$1/src",
|
||||
"^(value-matcher)": "$1/src",
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
import * as CM from './'
|
||||
|
||||
export const pattern =
|
||||
'foo !"\\\\ \\"" name:|(wonderwoman batman) hasCape? age:32 chi*go /^foo\\/bar\\./i'
|
||||
export const pattern = 'foo !"\\\\ \\"" name:|(wonderwoman batman) hasCape? age:32 chi*go /^foo\\/bar\\./i'
|
||||
|
||||
export const ast = new CM.And([
|
||||
new CM.String('foo'),
|
||||
new CM.Not(new CM.String('\\ "')),
|
||||
new CM.Property(
|
||||
'name',
|
||||
new CM.Or([new CM.String('wonderwoman'), new CM.String('batman')])
|
||||
),
|
||||
new CM.Property('name', new CM.Or([new CM.String('wonderwoman'), new CM.String('batman')])),
|
||||
new CM.TruthyProperty('hasCape'),
|
||||
new CM.Property('age', new CM.NumberOrStringNode('32')),
|
||||
new CM.GlobPattern('chi*go'),
|
||||
|
||||
@@ -141,10 +141,7 @@ export class NumberNode extends Node {
|
||||
}
|
||||
|
||||
match(value) {
|
||||
return (
|
||||
value === this.value ||
|
||||
(value !== null && typeof value === 'object' && some(value, this.match))
|
||||
)
|
||||
return value === this.value || (value !== null && typeof value === 'object' && some(value, this.match))
|
||||
}
|
||||
|
||||
toString() {
|
||||
@@ -170,8 +167,7 @@ export class NumberOrStringNode extends Node {
|
||||
value === numValue ||
|
||||
(typeof value === 'string'
|
||||
? value.toLowerCase().indexOf(lcValue) !== -1
|
||||
: (Array.isArray(value) || isPlainObject(value)) &&
|
||||
some(value, this.match))
|
||||
: (Array.isArray(value) || isPlainObject(value)) && some(value, this.match))
|
||||
)
|
||||
}
|
||||
|
||||
@@ -200,11 +196,7 @@ export class Property extends Node {
|
||||
|
||||
const escapeChar = char => '\\' + char
|
||||
const formatString = value =>
|
||||
Number.isNaN(+value)
|
||||
? isRawString(value)
|
||||
? value
|
||||
: `"${value.replace(/\\|"/g, escapeChar)}"`
|
||||
: `"${value}"`
|
||||
Number.isNaN(+value) ? (isRawString(value) ? value : `"${value.replace(/\\|"/g, escapeChar)}"`) : `"${value}"`
|
||||
|
||||
export class GlobPattern extends Node {
|
||||
constructor(value) {
|
||||
@@ -219,10 +211,7 @@ export class GlobPattern extends Node {
|
||||
|
||||
// should not be enumerable for the tests
|
||||
Object.defineProperty(this, 'match', {
|
||||
value: this.match.bind(
|
||||
this,
|
||||
new RegExp(value.split('*').map(escapeRegExp).join('.*'), 'i')
|
||||
),
|
||||
value: this.match.bind(this, new RegExp(value.split('*').map(escapeRegExp).join('.*'), 'i')),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -330,9 +319,7 @@ class Failure {
|
||||
}
|
||||
|
||||
get value() {
|
||||
throw new Error(
|
||||
`parse error: expected ${this.expected} at position ${this.pos}`
|
||||
)
|
||||
throw new Error(`parse error: expected ${this.expected} at position ${this.pos}`)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -369,9 +356,7 @@ class P {
|
||||
}
|
||||
|
||||
static lazy(parserCreator, arg) {
|
||||
const parser = new P((input, pos, end) =>
|
||||
(parser._parse = parserCreator(arg)._parse)(input, pos, end)
|
||||
)
|
||||
const parser = new P((input, pos, end) => (parser._parse = parserCreator(arg)._parse)(input, pos, end))
|
||||
return parser
|
||||
}
|
||||
|
||||
@@ -380,9 +365,7 @@ class P {
|
||||
return new P((input, pos) => {
|
||||
regex.lastIndex = pos
|
||||
const matches = regex.exec(input)
|
||||
return matches !== null
|
||||
? new Success(regex.lastIndex, matches[0])
|
||||
: new Failure(pos, regex)
|
||||
return matches !== null ? new Success(regex.lastIndex, matches[0]) : new Failure(pos, regex)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -405,9 +388,7 @@ class P {
|
||||
static text(text) {
|
||||
const { length } = text
|
||||
return new P((input, pos) =>
|
||||
input.startsWith(text, pos)
|
||||
? new Success(pos + length, text)
|
||||
: new Failure(pos, `'${text}'`)
|
||||
input.startsWith(text, pos) ? new Success(pos + length, text) : new Failure(pos, `'${text}'`)
|
||||
)
|
||||
}
|
||||
|
||||
@@ -443,10 +424,7 @@ class P {
|
||||
value.push(result.value)
|
||||
pos = result.pos
|
||||
}
|
||||
while (
|
||||
i < max &&
|
||||
(result = this._parse(input, pos, end)) instanceof Success
|
||||
) {
|
||||
while (i < max && (result = this._parse(input, pos, end)) instanceof Success) {
|
||||
++i
|
||||
value.push(result.value)
|
||||
pos = result.pos
|
||||
@@ -471,17 +449,13 @@ class P {
|
||||
}
|
||||
}
|
||||
|
||||
P.eof = new P((input, pos, end) =>
|
||||
pos < end ? new Failure(pos, 'end of input') : new Success(pos)
|
||||
)
|
||||
P.eof = new P((input, pos, end) => (pos < end ? new Failure(pos, 'end of input') : new Success(pos)))
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const parser = P.grammar({
|
||||
default: r =>
|
||||
P.seq(r.ws, r.term.repeat(), P.eof).map(([, terms]) =>
|
||||
terms.length === 0 ? new Null() : new And(terms)
|
||||
),
|
||||
P.seq(r.ws, r.term.repeat(), P.eof).map(([, terms]) => (terms.length === 0 ? new Null() : new And(terms))),
|
||||
globPattern: new P((input, pos, end) => {
|
||||
let value = ''
|
||||
let c
|
||||
@@ -489,9 +463,7 @@ const parser = P.grammar({
|
||||
++pos
|
||||
value += c
|
||||
}
|
||||
return value.length === 0
|
||||
? new Failure(pos, 'a raw string')
|
||||
: new Success(pos, value)
|
||||
return value.length === 0 ? new Failure(pos, 'a raw string') : new Success(pos, value)
|
||||
}),
|
||||
quotedString: new P((input, pos, end) => {
|
||||
if (input[pos] !== '"') {
|
||||
@@ -518,9 +490,7 @@ const parser = P.grammar({
|
||||
++pos
|
||||
value += c
|
||||
}
|
||||
return value.length === 0
|
||||
? new Failure(pos, 'a raw string')
|
||||
: new Success(pos, value)
|
||||
return value.length === 0 ? new Failure(pos, 'a raw string') : new Success(pos, value)
|
||||
}),
|
||||
regex: new P((input, pos, end) => {
|
||||
if (input[pos] !== '/') {
|
||||
@@ -551,17 +521,8 @@ const parser = P.grammar({
|
||||
}),
|
||||
term: r =>
|
||||
P.alt(
|
||||
P.seq(P.text('('), r.ws, r.term.repeat(1), P.text(')')).map(
|
||||
_ => new And(_[2])
|
||||
),
|
||||
P.seq(
|
||||
P.text('|'),
|
||||
r.ws,
|
||||
P.text('('),
|
||||
r.ws,
|
||||
r.term.repeat(1),
|
||||
P.text(')')
|
||||
).map(_ => new Or(_[4])),
|
||||
P.seq(P.text('('), r.ws, r.term.repeat(1), P.text(')')).map(_ => new And(_[2])),
|
||||
P.seq(P.text('|'), r.ws, P.text('('), r.ws, r.term.repeat(1), P.text(')')).map(_ => new Or(_[4])),
|
||||
P.seq(P.text('!'), r.ws, r.term).map(_ => new Not(_[2])),
|
||||
P.seq(P.regex(/[<>]=?/), r.rawString).map(([op, val]) => {
|
||||
val = +val
|
||||
@@ -570,9 +531,7 @@ const parser = P.grammar({
|
||||
}
|
||||
return new Comparison(op, val)
|
||||
}),
|
||||
P.seq(r.property, r.ws, P.text(':'), r.ws, r.term).map(
|
||||
_ => new Property(_[0], _[4])
|
||||
),
|
||||
P.seq(r.property, r.ws, P.text(':'), r.ws, r.term).map(_ => new Property(_[0], _[4])),
|
||||
P.seq(r.property, P.text('?')).map(_ => new TruthyProperty(_[0])),
|
||||
r.value
|
||||
).skip(r.ws),
|
||||
@@ -582,9 +541,7 @@ const parser = P.grammar({
|
||||
r.regex,
|
||||
r.globPattern.map(str => {
|
||||
const asNum = +str
|
||||
return Number.isNaN(asNum)
|
||||
? new GlobPattern(str)
|
||||
: new NumberOrStringNode(str)
|
||||
return Number.isNaN(asNum) ? new GlobPattern(str) : new NumberOrStringNode(str)
|
||||
})
|
||||
),
|
||||
ws: P.regex(/\s*/),
|
||||
@@ -664,12 +621,7 @@ export const getPropertyClausesStrings = node => {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export const setPropertyClause = (node, name, child) => {
|
||||
const property =
|
||||
child &&
|
||||
new Property(
|
||||
name,
|
||||
typeof child === 'string' ? new StringNode(child) : child
|
||||
)
|
||||
const property = child && new Property(name, typeof child === 'string' ? new StringNode(child) : child)
|
||||
|
||||
if (node === undefined) {
|
||||
return property
|
||||
|
||||
@@ -12,9 +12,7 @@ import {
|
||||
} from './'
|
||||
|
||||
it('getPropertyClausesStrings', () => {
|
||||
const tmp = getPropertyClausesStrings(
|
||||
parse('foo bar:baz baz:|(foo bar /^boo$/ /^far$/) foo:/^bar$/')
|
||||
)
|
||||
const tmp = getPropertyClausesStrings(parse('foo bar:baz baz:|(foo bar /^boo$/ /^far$/) foo:/^bar$/'))
|
||||
expect(tmp).toEqual({
|
||||
bar: ['baz'],
|
||||
baz: ['foo', 'bar', 'boo', 'far'],
|
||||
@@ -66,35 +64,23 @@ describe('NumberOrStringNode', () => {
|
||||
|
||||
describe('setPropertyClause', () => {
|
||||
it('creates a node if none passed', () => {
|
||||
expect(setPropertyClause(undefined, 'foo', 'bar').toString()).toBe(
|
||||
'foo:bar'
|
||||
)
|
||||
expect(setPropertyClause(undefined, 'foo', 'bar').toString()).toBe('foo:bar')
|
||||
})
|
||||
|
||||
it('adds a property clause if there was none', () => {
|
||||
expect(setPropertyClause(parse('baz'), 'foo', 'bar').toString()).toBe(
|
||||
'baz foo:bar'
|
||||
)
|
||||
expect(setPropertyClause(parse('baz'), 'foo', 'bar').toString()).toBe('baz foo:bar')
|
||||
})
|
||||
|
||||
it('replaces the property clause if there was one', () => {
|
||||
expect(
|
||||
setPropertyClause(parse('plip foo:baz plop'), 'foo', 'bar').toString()
|
||||
).toBe('plip plop foo:bar')
|
||||
expect(setPropertyClause(parse('plip foo:baz plop'), 'foo', 'bar').toString()).toBe('plip plop foo:bar')
|
||||
|
||||
expect(
|
||||
setPropertyClause(parse('foo:|(baz plop)'), 'foo', 'bar').toString()
|
||||
).toBe('foo:bar')
|
||||
expect(setPropertyClause(parse('foo:|(baz plop)'), 'foo', 'bar').toString()).toBe('foo:bar')
|
||||
})
|
||||
|
||||
it('removes the property clause if no chid is passed', () => {
|
||||
expect(
|
||||
setPropertyClause(parse('foo bar:baz qux'), 'bar', undefined).toString()
|
||||
).toBe('foo qux')
|
||||
expect(setPropertyClause(parse('foo bar:baz qux'), 'bar', undefined).toString()).toBe('foo qux')
|
||||
|
||||
expect(
|
||||
setPropertyClause(parse('foo bar:baz qux'), 'baz', undefined).toString()
|
||||
).toBe('foo bar:baz qux')
|
||||
expect(setPropertyClause(parse('foo bar:baz qux'), 'baz', undefined).toString()).toBe('foo bar:baz qux')
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -1,13 +1,7 @@
|
||||
// @flow
|
||||
|
||||
/* eslint-disable no-use-before-define */
|
||||
export type Pattern =
|
||||
| AndPattern
|
||||
| OrPattern
|
||||
| NotPattern
|
||||
| ObjectPattern
|
||||
| ArrayPattern
|
||||
| ValuePattern
|
||||
export type Pattern = AndPattern | OrPattern | NotPattern | ObjectPattern | ArrayPattern | ValuePattern
|
||||
/* eslint-enable no-use-before-define */
|
||||
|
||||
// all patterns must match
|
||||
@@ -77,5 +71,4 @@ const match = (pattern: Pattern, value: any) => {
|
||||
return pattern === value
|
||||
}
|
||||
|
||||
export const createPredicate = (pattern: Pattern) => (value: any) =>
|
||||
match(pattern, value)
|
||||
export const createPredicate = (pattern: Pattern) => (value: any) => match(pattern, value)
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
@@ -28,12 +28,12 @@
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.11.1",
|
||||
"@xen-orchestra/fs": "^0.12.1",
|
||||
"cli-progress": "^3.1.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"struct-fu": "^1.2.0",
|
||||
"vhd-lib": "^0.7.2"
|
||||
"vhd-lib": "^1.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -41,9 +41,9 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^7.0.2",
|
||||
"execa": "^4.0.2",
|
||||
"execa": "^5.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.15.0",
|
||||
"promise-toolbox": "^0.16.0",
|
||||
"rimraf": "^3.0.0",
|
||||
"tmp": "^0.2.1"
|
||||
},
|
||||
|
||||
@@ -16,8 +16,6 @@ export const writeStream = (input, path) => {
|
||||
const output = createOutputStream(path)
|
||||
|
||||
return new Promise((resolve, reject) =>
|
||||
input
|
||||
.on('error', reject)
|
||||
.pipe(output.on('error', reject).on('finish', resolve))
|
||||
input.on('error', reject).pipe(output.on('error', reject).on('finish', resolve))
|
||||
)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user