Compare commits
1 Commits
nr-fix-s3-
...
icinga2-te
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa92f0fc93 |
@@ -48,5 +48,9 @@ module.exports = {
|
||||
'lines-between-class-members': 'off',
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
'node/no-extraneous-require': 'error',
|
||||
'prefer-const': 'error',
|
||||
},
|
||||
}
|
||||
|
||||
@@ -3,9 +3,4 @@ module.exports = {
|
||||
jsxSingleQuote: true,
|
||||
semi: false,
|
||||
singleQuote: true,
|
||||
|
||||
// 2020-11-24: Requested by nraynaud and approved by the rest of the team
|
||||
//
|
||||
// https://team.vates.fr/vates/pl/a1i8af1b9id7pgzm3jcg4toacy
|
||||
printWidth: 120,
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ Installation of the [npm package](https://npmjs.org/package/@vates/coalesce-call
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import { coalesceCalls } from '@vates/coalesce-calls'
|
||||
import { coalesceCalls } from 'coalesce-calls'
|
||||
|
||||
const connect = coalesceCalls(async function () {
|
||||
// async operation
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/multi-key-map
|
||||
|
||||
[](https://npmjs.org/package/@vates/multi-key-map)  [](https://bundlephobia.com/result?p=@vates/multi-key-map) [](https://npmjs.org/package/@vates/multi-key-map)
|
||||
|
||||
> Create map with values affected to multiple keys
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/multi-key-map):
|
||||
|
||||
```
|
||||
> npm install --save @vates/multi-key-map
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import { MultiKeyMap } from '@vates/multi-key-map'
|
||||
|
||||
const map = new MultiKeyMap()
|
||||
|
||||
const OBJ = {}
|
||||
map.set([], 0)
|
||||
map.set(['foo'], 1)
|
||||
map.set(['foo', 'bar'], 2)
|
||||
map.set(['bar', 'foo'], 3)
|
||||
map.set([OBJ], 4)
|
||||
map.set([{}], 5)
|
||||
|
||||
map.get([]) // 0
|
||||
map.get(['foo']) // 1
|
||||
map.get(['foo', 'bar']) // 2
|
||||
map.get(['bar', 'foo']) // 3
|
||||
map.get([OBJ]) // 4
|
||||
map.get([{}]) // undefined
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,20 +0,0 @@
|
||||
```js
|
||||
import { MultiKeyMap } from '@vates/multi-key-map'
|
||||
|
||||
const map = new MultiKeyMap()
|
||||
|
||||
const OBJ = {}
|
||||
map.set([], 0)
|
||||
map.set(['foo'], 1)
|
||||
map.set(['foo', 'bar'], 2)
|
||||
map.set(['bar', 'foo'], 3)
|
||||
map.set([OBJ], 4)
|
||||
map.set([{}], 5)
|
||||
|
||||
map.get([]) // 0
|
||||
map.get(['foo']) // 1
|
||||
map.get(['foo', 'bar']) // 2
|
||||
map.get(['bar', 'foo']) // 3
|
||||
map.get([OBJ]) // 4
|
||||
map.get([{}]) // undefined
|
||||
```
|
||||
@@ -1,28 +0,0 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/multi-key-map",
|
||||
"description": "Create map with values affected to multiple keys",
|
||||
"keywords": [
|
||||
"cache",
|
||||
"map"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/multi-key-map",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/multi-key-map",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
@@ -1 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -46,6 +46,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.2.0",
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -30,7 +30,6 @@
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"core-js": "^3.6.4",
|
||||
"golike-defer": "^0.4.1",
|
||||
"lodash": "^4.17.15",
|
||||
|
||||
@@ -2,12 +2,9 @@
|
||||
import 'core-js/features/symbol/async-iterator'
|
||||
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import defer from 'golike-defer'
|
||||
import hash from 'object-hash'
|
||||
|
||||
const log = createLogger('xo:audit-core')
|
||||
|
||||
export class Storage {
|
||||
constructor() {
|
||||
this._lock = Promise.resolve()
|
||||
@@ -28,7 +25,7 @@ export class Storage {
|
||||
//
|
||||
// http://man7.org/linux/man-pages/man3/crypt.3.html#NOTES
|
||||
const ID_TO_ALGORITHM = {
|
||||
5: 'sha256',
|
||||
'5': 'sha256',
|
||||
}
|
||||
|
||||
export class AlteredRecordError extends Error {
|
||||
@@ -68,17 +65,8 @@ export class AuditCore {
|
||||
@defer
|
||||
async add($defer, subject, event, data) {
|
||||
const time = Date.now()
|
||||
$defer(await this._storage.acquireLock())
|
||||
return this._addUnsafe({
|
||||
data,
|
||||
event,
|
||||
subject,
|
||||
time,
|
||||
})
|
||||
}
|
||||
|
||||
async _addUnsafe({ data, event, subject, time }) {
|
||||
const storage = this._storage
|
||||
$defer(await storage.acquireLock())
|
||||
|
||||
// delete "undefined" properties and normalize data with JSON.stringify
|
||||
const record = JSON.parse(
|
||||
@@ -119,7 +107,9 @@ export class AuditCore {
|
||||
if (record === undefined) {
|
||||
throw new MissingRecordError(newest, nValid)
|
||||
}
|
||||
if (newest !== createHash(record, newest.slice(1, newest.indexOf('$', 1)))) {
|
||||
if (
|
||||
newest !== createHash(record, newest.slice(1, newest.indexOf('$', 1)))
|
||||
) {
|
||||
throw new AlteredRecordError(newest, nValid, record)
|
||||
}
|
||||
newest = record.previousId
|
||||
@@ -149,45 +139,4 @@ export class AuditCore {
|
||||
await this._storage.del(id)
|
||||
}
|
||||
}
|
||||
|
||||
@defer
|
||||
async deleteRangeAndRewrite($defer, newest, oldest) {
|
||||
assert.notStrictEqual(newest, undefined)
|
||||
assert.notStrictEqual(oldest, undefined)
|
||||
|
||||
const storage = this._storage
|
||||
$defer(await storage.acquireLock())
|
||||
|
||||
assert.notStrictEqual(await storage.get(newest), undefined)
|
||||
const oldestRecord = await storage.get(oldest)
|
||||
assert.notStrictEqual(oldestRecord, undefined)
|
||||
|
||||
const lastId = await storage.getLastId()
|
||||
const recentRecords = []
|
||||
for await (const record of this.getFrom(lastId)) {
|
||||
if (record.id === newest) {
|
||||
break
|
||||
}
|
||||
|
||||
recentRecords.push(record)
|
||||
}
|
||||
|
||||
for await (const record of this.getFrom(newest)) {
|
||||
await storage.del(record.id)
|
||||
if (record.id === oldest) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
await storage.setLastId(oldestRecord.previousId)
|
||||
|
||||
for (const record of recentRecords) {
|
||||
try {
|
||||
await this._addUnsafe(record)
|
||||
await storage.del(record.id)
|
||||
} catch (error) {
|
||||
log.error(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { AlteredRecordError, AuditCore, MissingRecordError, NULL_ID, Storage } from '.'
|
||||
import {
|
||||
AlteredRecordError,
|
||||
AuditCore,
|
||||
MissingRecordError,
|
||||
NULL_ID,
|
||||
Storage,
|
||||
} from '.'
|
||||
|
||||
const asyncIteratorToArray = async asyncIterator => {
|
||||
const array = []
|
||||
@@ -82,13 +88,16 @@ describe('auditCore', () => {
|
||||
it('detects that a record is missing', async () => {
|
||||
const [newestRecord, deletedRecord] = await storeAuditRecords()
|
||||
|
||||
const nValidRecords = await auditCore.checkIntegrity(NULL_ID, newestRecord.id)
|
||||
const nValidRecords = await auditCore.checkIntegrity(
|
||||
NULL_ID,
|
||||
newestRecord.id
|
||||
)
|
||||
expect(nValidRecords).toBe(DATA.length)
|
||||
|
||||
await db.del(deletedRecord.id)
|
||||
await expect(auditCore.checkIntegrity(NULL_ID, newestRecord.id)).rejects.toEqual(
|
||||
new MissingRecordError(deletedRecord.id, 1)
|
||||
)
|
||||
await expect(
|
||||
auditCore.checkIntegrity(NULL_ID, newestRecord.id)
|
||||
).rejects.toEqual(new MissingRecordError(deletedRecord.id, 1))
|
||||
})
|
||||
|
||||
it('detects that a record has been altered', async () => {
|
||||
@@ -97,7 +106,9 @@ describe('auditCore', () => {
|
||||
alteredRecord.event = ''
|
||||
await db.put(alteredRecord)
|
||||
|
||||
await expect(auditCore.checkIntegrity(NULL_ID, newestRecord.id)).rejects.toEqual(
|
||||
await expect(
|
||||
auditCore.checkIntegrity(NULL_ID, newestRecord.id)
|
||||
).rejects.toEqual(
|
||||
new AlteredRecordError(alteredRecord.id, 1, alteredRecord)
|
||||
)
|
||||
})
|
||||
|
||||
@@ -17,10 +17,9 @@ interface Record {
|
||||
}
|
||||
|
||||
export class AuditCore {
|
||||
constructor(storage: Storage) { }
|
||||
public add(subject: any, event: string, data: any): Promise<Record> { }
|
||||
public checkIntegrity(oldest: string, newest: string): Promise<number> { }
|
||||
public getFrom(newest?: string): AsyncIterator { }
|
||||
public deleteFrom(newest: string): Promise<void> { }
|
||||
public deleteRangeAndRewrite(newest: string, oldest: string): Promise<void> { }
|
||||
constructor(storage: Storage) {}
|
||||
public add(subject: any, event: string, data: any): Promise<Record> {}
|
||||
public checkIntegrity(oldest: string, newest: string): Promise<number> {}
|
||||
public getFrom(newest?: string): AsyncIterator {}
|
||||
public deleteFrom(newest: string): Promise<void> {}
|
||||
}
|
||||
|
||||
@@ -38,11 +38,18 @@ const configs = {
|
||||
|
||||
const getConfig = (key, ...args) => {
|
||||
const config = configs[key]
|
||||
return config === undefined ? {} : typeof config === 'function' ? config(...args) : config
|
||||
return config === undefined
|
||||
? {}
|
||||
: typeof config === 'function'
|
||||
? config(...args)
|
||||
: config
|
||||
}
|
||||
|
||||
// some plugins must be used in a specific order
|
||||
const pluginsOrder = ['@babel/plugin-proposal-decorators', '@babel/plugin-proposal-class-properties']
|
||||
const pluginsOrder = [
|
||||
'@babel/plugin-proposal-decorators',
|
||||
'@babel/plugin-proposal-class-properties',
|
||||
]
|
||||
|
||||
module.exports = function (pkg, plugins, presets) {
|
||||
plugins === undefined && (plugins = {})
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
const curryRight = require('lodash/curryRight')
|
||||
|
||||
module.exports = curryRight((iterable, fn) =>
|
||||
Promise.all(Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn))
|
||||
Promise.all(
|
||||
Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn)
|
||||
)
|
||||
)
|
||||
|
||||
@@ -3,17 +3,6 @@ const { dirname } = require('path')
|
||||
const fs = require('promise-toolbox/promisifyAll')(require('fs'))
|
||||
module.exports = fs
|
||||
|
||||
fs.getSize = path =>
|
||||
fs.stat(path).then(
|
||||
_ => _.size,
|
||||
error => {
|
||||
if (error.code === 'ENOENT') {
|
||||
return 0
|
||||
}
|
||||
throw error
|
||||
}
|
||||
)
|
||||
|
||||
fs.mktree = async function mkdirp(path) {
|
||||
try {
|
||||
await fs.mkdir(path)
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
// assigned when options are parsed by the main function
|
||||
let force, merge
|
||||
let force
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const assert = require('assert')
|
||||
const flatten = require('lodash/flatten')
|
||||
const getopts = require('getopts')
|
||||
const limitConcurrency = require('limit-concurrency-decorator').default
|
||||
const lockfile = require('proper-lockfile')
|
||||
const pipe = require('promise-toolbox/pipe')
|
||||
const { default: Vhd, mergeVhd } = require('vhd-lib')
|
||||
const { default: Vhd } = require('vhd-lib')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
|
||||
const { isValidXva } = require('@xen-orchestra/backups/isValidXva')
|
||||
@@ -27,10 +26,10 @@ const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
|
||||
//
|
||||
// the whole chain will be merged into parent, parent will be renamed to child
|
||||
// and all the others will deleted
|
||||
const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain) {
|
||||
async function mergeVhdChain(chain) {
|
||||
assert(chain.length >= 2)
|
||||
|
||||
let child = chain[0]
|
||||
const child = chain[0]
|
||||
const parent = chain[chain.length - 1]
|
||||
const children = chain.slice(0, -1).reverse()
|
||||
|
||||
@@ -41,42 +40,21 @@ const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain) {
|
||||
.forEach(parent => {
|
||||
console.warn(' ', parent)
|
||||
})
|
||||
merge && console.warn(' merging…')
|
||||
force && console.warn(' merging…')
|
||||
console.warn('')
|
||||
if (merge) {
|
||||
if (force) {
|
||||
// `mergeVhd` does not work with a stream, either
|
||||
// - make it accept a stream
|
||||
// - or create synthetic VHD which is not a stream
|
||||
if (children.length !== 1) {
|
||||
console.warn('TODO: implement merging multiple children')
|
||||
children.length = 1
|
||||
child = children[0]
|
||||
}
|
||||
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
console.log('merging %s: %s/%s', child, done, total)
|
||||
}
|
||||
}, 10e3)
|
||||
|
||||
await mergeVhd(
|
||||
handler,
|
||||
parent,
|
||||
handler,
|
||||
child,
|
||||
// children.length === 1
|
||||
// ? child
|
||||
// : await createSyntheticStream(handler, children),
|
||||
{
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
clearInterval(handle)
|
||||
return console.warn('TODO: implement merge')
|
||||
// await mergeVhd(
|
||||
// handler,
|
||||
// parent,
|
||||
// handler,
|
||||
// children.length === 1
|
||||
// ? child
|
||||
// : await createSyntheticStream(handler, children)
|
||||
// )
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
@@ -88,7 +66,7 @@ const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain) {
|
||||
return force && handler.unlink(child)
|
||||
}),
|
||||
])
|
||||
})
|
||||
}
|
||||
|
||||
const listVhds = pipe([
|
||||
vmDir => vmDir + '/vdis',
|
||||
@@ -115,7 +93,9 @@ async function handleVm(vmDir) {
|
||||
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
|
||||
vhdParents[path] = parent
|
||||
if (parent in vhdChildren) {
|
||||
const error = new Error('this script does not support multiple VHD children')
|
||||
const error = new Error(
|
||||
'this script does not support multiple VHD children'
|
||||
)
|
||||
error.parent = parent
|
||||
error.child1 = vhdChildren[parent]
|
||||
error.child2 = path
|
||||
@@ -222,7 +202,11 @@ async function handleVm(vmDir) {
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
|
||||
console.warn(' %i/%i missing VHDs', missingVhds.length, linkedVhds.length)
|
||||
console.warn(
|
||||
' %i/%i missing VHDs',
|
||||
missingVhds.length,
|
||||
linkedVhds.length
|
||||
)
|
||||
missingVhds.forEach(vhd => {
|
||||
console.warn(' ', vhd)
|
||||
})
|
||||
@@ -309,16 +293,14 @@ module.exports = async function main(args) {
|
||||
const opts = getopts(args, {
|
||||
alias: {
|
||||
force: 'f',
|
||||
merge: 'm',
|
||||
},
|
||||
boolean: ['force', 'merge'],
|
||||
boolean: ['force'],
|
||||
default: {
|
||||
force: false,
|
||||
merge: false,
|
||||
},
|
||||
})
|
||||
|
||||
;({ force, merge } = opts)
|
||||
;({ force } = opts)
|
||||
await asyncMap(opts._, async vmDir => {
|
||||
vmDir = resolve(vmDir)
|
||||
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
const groupBy = require('lodash/groupBy')
|
||||
const { createHash } = require('crypto')
|
||||
const { dirname, resolve } = require('path')
|
||||
|
||||
const asyncMap = require('../_asyncMap')
|
||||
const { readdir2, readFile, getSize } = require('../_fs')
|
||||
|
||||
const sha512 = str => createHash('sha512').update(str).digest('hex')
|
||||
const sum = values => values.reduce((a, b) => a + b)
|
||||
|
||||
module.exports = async function info(vmDirs) {
|
||||
const jsonFiles = (
|
||||
await asyncMap(vmDirs, async vmDir => (await readdir2(vmDir)).filter(_ => _.endsWith('.json')))
|
||||
).flat()
|
||||
|
||||
const hashes = { __proto__: null }
|
||||
|
||||
const info = (
|
||||
await asyncMap(jsonFiles, async jsonFile => {
|
||||
try {
|
||||
const jsonDir = dirname(jsonFile)
|
||||
const json = await readFile(jsonFile)
|
||||
|
||||
const hash = sha512(json)
|
||||
if (hash in hashes) {
|
||||
console.log(jsonFile, 'duplicate of', hashes[hash])
|
||||
return
|
||||
}
|
||||
hashes[hash] = jsonFile
|
||||
|
||||
const metadata = JSON.parse(json)
|
||||
|
||||
return {
|
||||
jsonDir,
|
||||
jsonFile,
|
||||
metadata,
|
||||
size:
|
||||
json.length +
|
||||
(await (metadata.mode === 'delta'
|
||||
? asyncMap(Object.values(metadata.vhds), _ => getSize(resolve(jsonDir, _))).then(sum)
|
||||
: getSize(resolve(jsonDir, metadata.xva)))),
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(jsonFile, error)
|
||||
}
|
||||
})
|
||||
).filter(_ => _ !== undefined)
|
||||
const byJobs = groupBy(info, 'metadata.jobId')
|
||||
Object.keys(byJobs)
|
||||
.sort()
|
||||
.forEach(jobId => {
|
||||
console.log(jobId, sum(byJobs[jobId].map(_ => _.size)))
|
||||
})
|
||||
}
|
||||
@@ -5,7 +5,7 @@ require('./_composeCommands')({
|
||||
get main() {
|
||||
return require('./commands/clean-vms')
|
||||
},
|
||||
usage: '[--force] [--merge] xo-vm-backups/*',
|
||||
usage: '[--force] xo-vm-backups/*',
|
||||
},
|
||||
'create-symlink-index': {
|
||||
get main() {
|
||||
@@ -13,12 +13,6 @@ require('./_composeCommands')({
|
||||
},
|
||||
usage: 'xo-vm-backups <field path>',
|
||||
},
|
||||
info: {
|
||||
get main() {
|
||||
return require('./commands/info')
|
||||
},
|
||||
usage: 'xo-vm-backups/*',
|
||||
},
|
||||
})(process.argv.slice(2), 'xo-backups').catch(error => {
|
||||
console.error('main', error)
|
||||
process.exitCode = 1
|
||||
|
||||
@@ -7,14 +7,13 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/backups": "^0.1.1",
|
||||
"@xen-orchestra/fs": "^0.12.1",
|
||||
"@xen-orchestra/fs": "^0.10.4",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.16.0",
|
||||
"promise-toolbox": "^0.15.0",
|
||||
"proper-lockfile": "^4.1.1",
|
||||
"vhd-lib": "^1.0.0"
|
||||
"vhd-lib": "^0.7.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=7.10.1"
|
||||
@@ -33,7 +32,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.3.0",
|
||||
"version": "0.0.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// returns all entries but the last retention-th
|
||||
exports.getOldEntries = (retention, entries) =>
|
||||
entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries
|
||||
entries === undefined
|
||||
? []
|
||||
: retention > 0
|
||||
? entries.slice(0, -retention)
|
||||
: entries
|
||||
|
||||
@@ -4,7 +4,10 @@ const fs = require('fs-extra')
|
||||
const isGzipFile = async fd => {
|
||||
// https://tools.ietf.org/html/rfc1952.html#page-5
|
||||
const magicNumber = Buffer.allocUnsafe(2)
|
||||
assert.strictEqual((await fs.read(fd, magicNumber, 0, magicNumber.length, 0)).bytesRead, magicNumber.length)
|
||||
assert.strictEqual(
|
||||
(await fs.read(fd, magicNumber, 0, magicNumber.length, 0)).bytesRead,
|
||||
magicNumber.length
|
||||
)
|
||||
return magicNumber[0] === 31 && magicNumber[1] === 139
|
||||
}
|
||||
|
||||
@@ -27,7 +30,10 @@ const isValidTar = async (size, fd) => {
|
||||
}
|
||||
|
||||
const buf = Buffer.allocUnsafe(1024)
|
||||
assert.strictEqual((await fs.read(fd, buf, 0, buf.length, size - buf.length)).bytesRead, buf.length)
|
||||
assert.strictEqual(
|
||||
(await fs.read(fd, buf, 0, buf.length, size - buf.length)).bytesRead,
|
||||
buf.length
|
||||
)
|
||||
return buf.every(_ => _ === 0)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"d3-time-format": "^3.0.0",
|
||||
"d3-time-format": "^2.2.3",
|
||||
"fs-extra": "^9.0.0"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
|
||||
@@ -32,7 +32,14 @@ ${cliName} v${pkg.version}
|
||||
)
|
||||
}
|
||||
|
||||
const [srcXapiUrl, srcSnapshotUuid, tgtXapiUrl, tgtVmUuid, jobId, scheduleId] = args
|
||||
const [
|
||||
srcXapiUrl,
|
||||
srcSnapshotUuid,
|
||||
tgtXapiUrl,
|
||||
tgtVmUuid,
|
||||
jobId,
|
||||
scheduleId,
|
||||
] = args
|
||||
|
||||
const srcXapi = new Xapi({
|
||||
allowUnauthorized: true,
|
||||
@@ -63,10 +70,16 @@ ${cliName} v${pkg.version}
|
||||
'xo:backup:vm': srcVm.uuid,
|
||||
}
|
||||
|
||||
const [srcDisks, tgtDisks] = await Promise.all([srcXapi.getVmDisks(srcSnapshot), tgtXapi.getVmDisks(tgtVm)])
|
||||
const [srcDisks, tgtDisks] = await Promise.all([
|
||||
srcXapi.getVmDisks(srcSnapshot),
|
||||
tgtXapi.getVmDisks(tgtVm),
|
||||
])
|
||||
const userDevices = Object.keys(tgtDisks)
|
||||
|
||||
const tgtSr = await tgtXapi.getRecord('SR', tgtDisks[Object.keys(tgtDisks)[0]].SR)
|
||||
const tgtSr = await tgtXapi.getRecord(
|
||||
'SR',
|
||||
tgtDisks[Object.keys(tgtDisks)[0]].SR
|
||||
)
|
||||
|
||||
await Promise.all([
|
||||
srcSnapshot.update_other_config(metadata),
|
||||
@@ -77,7 +90,10 @@ ${cliName} v${pkg.version}
|
||||
'xo:backup:sr': tgtSr.uuid,
|
||||
'xo:copy_of': srcSnapshotUuid,
|
||||
}),
|
||||
tgtVm.update_blocked_operations('start', 'Start operation for this vm is blocked, clone it if you want to use it.'),
|
||||
tgtVm.update_blocked_operations(
|
||||
'start',
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
),
|
||||
Promise.all(
|
||||
userDevices.map(userDevice => {
|
||||
const srcDisk = srcDisks[userDevice]
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -42,7 +42,10 @@ class Job {
|
||||
const now = schedule._createDate()
|
||||
scheduledDate = +next(schedule._schedule, now)
|
||||
const delay = scheduledDate - now
|
||||
this._timeout = delay < MAX_DELAY ? setTimeout(wrapper, delay) : setTimeout(scheduleNext, MAX_DELAY)
|
||||
this._timeout =
|
||||
delay < MAX_DELAY
|
||||
? setTimeout(wrapper, delay)
|
||||
: setTimeout(scheduleNext, MAX_DELAY)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,7 +73,12 @@ class Job {
|
||||
class Schedule {
|
||||
constructor(pattern, zone = 'utc') {
|
||||
this._schedule = parse(pattern)
|
||||
this._createDate = zone.toLowerCase() === 'utc' ? moment.utc : zone === 'local' ? moment : () => moment.tz(zone)
|
||||
this._createDate =
|
||||
zone.toLowerCase() === 'utc'
|
||||
? moment.utc
|
||||
: zone === 'local'
|
||||
? moment
|
||||
: () => moment.tz(zone)
|
||||
}
|
||||
|
||||
createJob(fn) {
|
||||
|
||||
@@ -37,7 +37,9 @@ describe('next()', () => {
|
||||
})
|
||||
|
||||
it('fails when no solutions has been found', () => {
|
||||
expect(() => N('0 0 30 feb *')).toThrow('no solutions found for this schedule')
|
||||
expect(() => N('0 0 30 feb *')).toThrow(
|
||||
'no solutions found for this schedule'
|
||||
)
|
||||
})
|
||||
|
||||
it('select the first sunday of the month', () => {
|
||||
|
||||
@@ -66,7 +66,9 @@ const createParser = ({ fields: [...fields], presets: { ...presets } }) => {
|
||||
aliasesRegExp.lastIndex = i
|
||||
const matches = aliasesRegExp.exec(pattern)
|
||||
if (matches === null) {
|
||||
throw new SyntaxError(`${field.name}: missing alias or integer at character ${i}`)
|
||||
throw new SyntaxError(
|
||||
`${field.name}: missing alias or integer at character ${i}`
|
||||
)
|
||||
}
|
||||
const [alias] = matches
|
||||
i += alias.length
|
||||
@@ -75,7 +77,9 @@ const createParser = ({ fields: [...fields], presets: { ...presets } }) => {
|
||||
|
||||
const { range } = field
|
||||
if (value < range[0] || value > range[1]) {
|
||||
throw new SyntaxError(`${field.name}: ${value} is not between ${range[0]} and ${range[1]}`)
|
||||
throw new SyntaxError(
|
||||
`${field.name}: ${value} is not between ${range[0]} and ${range[1]}`
|
||||
)
|
||||
}
|
||||
return value
|
||||
}
|
||||
@@ -113,7 +117,9 @@ const createParser = ({ fields: [...fields], presets: { ...presets } }) => {
|
||||
{
|
||||
const schedule = presets[p]
|
||||
if (schedule !== undefined) {
|
||||
return typeof schedule === 'string' ? (presets[p] = parse(schedule)) : schedule
|
||||
return typeof schedule === 'string'
|
||||
? (presets[p] = parse(schedule))
|
||||
: schedule
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,7 +142,9 @@ const createParser = ({ fields: [...fields], presets: { ...presets } }) => {
|
||||
|
||||
consumeWhitespaces()
|
||||
if (i !== n) {
|
||||
throw new SyntaxError(`unexpected character at offset ${i}, expected end`)
|
||||
throw new SyntaxError(
|
||||
`unexpected character at offset ${i}, expected end`
|
||||
)
|
||||
}
|
||||
|
||||
return schedule
|
||||
|
||||
@@ -33,7 +33,9 @@ describe('parse()', () => {
|
||||
})
|
||||
|
||||
it('reports invalid aliases', () => {
|
||||
expect(() => parse('* * * jan-foo *')).toThrow('month: missing alias or integer at character 10')
|
||||
expect(() => parse('* * * jan-foo *')).toThrow(
|
||||
'month: missing alias or integer at character 10'
|
||||
)
|
||||
})
|
||||
|
||||
it('dayOfWeek: 0 and 7 bind to sunday', () => {
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -60,4 +60,5 @@ export const get = (accessor: (input: ?any) => any, arg: ?any) => {
|
||||
// _ => new ProxyAgent(_)
|
||||
// )
|
||||
// ```
|
||||
export const ifDef = (value: ?any, thenFn: (value: any) => any) => (value !== undefined ? thenFn(value) : value)
|
||||
export const ifDef = (value: ?any, thenFn: (value: any) => any) =>
|
||||
value !== undefined ? thenFn(value) : value
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -19,11 +19,6 @@ import EE from 'events'
|
||||
import emitAsync from '@xen-orchestra/emit-async'
|
||||
|
||||
const ee = new EE()
|
||||
|
||||
// exposing emitAsync on our event emitter
|
||||
//
|
||||
// it's not required though and we could have used directly via
|
||||
// emitAsync.call(ee, event, args...)
|
||||
ee.emitAsync = emitAsync
|
||||
|
||||
ee.on('start', async function () {
|
||||
@@ -31,7 +26,7 @@ ee.on('start', async function () {
|
||||
})
|
||||
|
||||
// similar to EventEmmiter#emit() but returns a promise which resolves when all
|
||||
// listeners have settled
|
||||
// listeners have resolved
|
||||
await ee.emitAsync('start')
|
||||
|
||||
// by default, it will rejects as soon as one listener reject, you can customise
|
||||
|
||||
@@ -3,11 +3,6 @@ import EE from 'events'
|
||||
import emitAsync from '@xen-orchestra/emit-async'
|
||||
|
||||
const ee = new EE()
|
||||
|
||||
// exposing emitAsync on our event emitter
|
||||
//
|
||||
// it's not required though and we could have used directly via
|
||||
// emitAsync.call(ee, event, args...)
|
||||
ee.emitAsync = emitAsync
|
||||
|
||||
ee.on('start', async function () {
|
||||
@@ -15,7 +10,7 @@ ee.on('start', async function () {
|
||||
})
|
||||
|
||||
// similar to EventEmmiter#emit() but returns a promise which resolves when all
|
||||
// listeners have settled
|
||||
// listeners have resolved
|
||||
await ee.emitAsync('start')
|
||||
|
||||
// by default, it will rejects as soon as one listener reject, you can customise
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.12.1",
|
||||
"version": "0.10.4",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
@@ -22,22 +22,20 @@
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@marsaud/smb2": "^0.17.2",
|
||||
"@marsaud/smb2": "^0.15.0",
|
||||
"@sindresorhus/df": "^3.1.1",
|
||||
"@sullux/aws-sdk": "^1.0.5",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"aws-sdk": "^2.686.0",
|
||||
"decorator-synchronized": "^0.5.0",
|
||||
"execa": "^5.0.0",
|
||||
"execa": "^4.0.2",
|
||||
"fs-extra": "^9.0.0",
|
||||
"get-stream": "^6.0.0",
|
||||
"get-stream": "^5.1.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.16.0",
|
||||
"promise-toolbox": "^0.15.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^4.0.2",
|
||||
"tmp": "^0.2.1",
|
||||
"xo-remote-parser": "^0.6.0"
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.1.0",
|
||||
"xo-remote-parser": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -60,7 +58,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
},
|
||||
"author": {
|
||||
|
||||
@@ -6,19 +6,33 @@ import { tmpdir } from 'os'
|
||||
|
||||
import LocalHandler from './local'
|
||||
|
||||
const sudoExeca = (command, args, opts) => execa('sudo', [command, ...args], opts)
|
||||
const sudoExeca = (command, args, opts) =>
|
||||
execa('sudo', [command, ...args], opts)
|
||||
|
||||
export default class MountHandler extends LocalHandler {
|
||||
constructor(remote, { mountsDir = join(tmpdir(), 'xo-fs-mounts'), useSudo = false, ...opts } = {}, params) {
|
||||
constructor(
|
||||
remote,
|
||||
{
|
||||
mountsDir = join(tmpdir(), 'xo-fs-mounts'),
|
||||
useSudo = false,
|
||||
...opts
|
||||
} = {},
|
||||
params
|
||||
) {
|
||||
super(remote, opts)
|
||||
|
||||
this._execa = useSudo ? sudoExeca : execa
|
||||
this._keeper = undefined
|
||||
this._params = {
|
||||
...params,
|
||||
options: [params.options, remote.options ?? params.defaultOptions].filter(_ => _ !== undefined).join(','),
|
||||
options: [params.options, remote.options]
|
||||
.filter(_ => _ !== undefined)
|
||||
.join(','),
|
||||
}
|
||||
this._realPath = join(mountsDir, remote.id || Math.random().toString(36).slice(2))
|
||||
this._realPath = join(
|
||||
mountsDir,
|
||||
remote.id || Math.random().toString(36).slice(2)
|
||||
)
|
||||
}
|
||||
|
||||
async _forget() {
|
||||
@@ -61,12 +75,16 @@ export default class MountHandler extends LocalHandler {
|
||||
|
||||
// Linux mount is more flexible in which order the mount arguments appear.
|
||||
// But FreeBSD requires this order of the arguments.
|
||||
await this._execa('mount', ['-o', options, '-t', type, device, realPath], {
|
||||
env: {
|
||||
LANG: 'C',
|
||||
...env,
|
||||
},
|
||||
})
|
||||
await this._execa(
|
||||
'mount',
|
||||
['-o', options, '-t', type, device, realPath],
|
||||
{
|
||||
env: {
|
||||
LANG: 'C',
|
||||
...env,
|
||||
},
|
||||
}
|
||||
)
|
||||
} catch (error) {
|
||||
try {
|
||||
// the failure may mean it's already mounted, use `findmnt` to check
|
||||
@@ -81,7 +99,9 @@ export default class MountHandler extends LocalHandler {
|
||||
|
||||
// keep an open file on the mount to prevent it from being unmounted if used
|
||||
// by another handler/process
|
||||
const keeperPath = `${realPath}/.keeper_${Math.random().toString(36).slice(2)}`
|
||||
const keeperPath = `${realPath}/.keeper_${Math.random()
|
||||
.toString(36)
|
||||
.slice(2)}`
|
||||
this._keeper = await fs.open(keeperPath, 'w')
|
||||
ignoreErrors.call(fs.unlink(keeperPath))
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import getStream from 'get-stream'
|
||||
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import limit from 'limit-concurrency-decorator'
|
||||
import path, { basename } from 'path'
|
||||
import path from 'path'
|
||||
import synchronized from 'decorator-synchronized'
|
||||
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
@@ -86,7 +86,9 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
;({ timeout: this._timeout = DEFAULT_TIMEOUT } = options)
|
||||
|
||||
const sharedLimit = limit(options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS)
|
||||
const sharedLimit = limit(
|
||||
options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS
|
||||
)
|
||||
this.closeFile = sharedLimit(this.closeFile)
|
||||
this.getInfo = sharedLimit(this.getInfo)
|
||||
this.getSize = sharedLimit(this.getSize)
|
||||
@@ -119,15 +121,16 @@ export default class RemoteHandlerAbstract {
|
||||
await this.__closeFile(fd)
|
||||
}
|
||||
|
||||
// TODO: remove method
|
||||
async createOutputStream(file: File, { checksum = false, dirMode, ...options }: Object = {}): Promise<LaxWritable> {
|
||||
async createOutputStream(
|
||||
file: File,
|
||||
{ checksum = false, ...options }: Object = {}
|
||||
): Promise<LaxWritable> {
|
||||
if (typeof file === 'string') {
|
||||
file = normalizePath(file)
|
||||
}
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const streamP = timeout.call(
|
||||
this._createOutputStream(file, {
|
||||
dirMode,
|
||||
flags: 'wx',
|
||||
...options,
|
||||
}),
|
||||
@@ -149,7 +152,9 @@ export default class RemoteHandlerAbstract {
|
||||
|
||||
// $FlowFixMe
|
||||
checksumStream.checksumWritten = checksumStream.checksum
|
||||
.then(value => this._outputFile(checksumFile(path), value, { flags: 'wx' }))
|
||||
.then(value =>
|
||||
this._outputFile(checksumFile(path), value, { flags: 'wx' })
|
||||
)
|
||||
.catch(forwardError)
|
||||
|
||||
return checksumStream
|
||||
@@ -163,24 +168,30 @@ export default class RemoteHandlerAbstract {
|
||||
file = normalizePath(file)
|
||||
}
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const streamP = timeout.call(this._createReadStream(file, options), this._timeout).then(stream => {
|
||||
// detect early errors
|
||||
let promise = fromEvent(stream, 'readable')
|
||||
const streamP = timeout
|
||||
.call(this._createReadStream(file, options), this._timeout)
|
||||
.then(stream => {
|
||||
// detect early errors
|
||||
let promise = fromEvent(stream, 'readable')
|
||||
|
||||
// try to add the length prop if missing and not a range stream
|
||||
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
|
||||
promise = Promise.all([
|
||||
promise,
|
||||
ignoreErrors.call(
|
||||
this._getSize(file).then(size => {
|
||||
stream.length = size
|
||||
})
|
||||
),
|
||||
])
|
||||
}
|
||||
// try to add the length prop if missing and not a range stream
|
||||
if (
|
||||
stream.length === undefined &&
|
||||
options.end === undefined &&
|
||||
options.start === undefined
|
||||
) {
|
||||
promise = Promise.all([
|
||||
promise,
|
||||
ignoreErrors.call(
|
||||
this._getSize(file).then(size => {
|
||||
stream.length = size
|
||||
})
|
||||
),
|
||||
])
|
||||
}
|
||||
|
||||
return promise.then(() => stream)
|
||||
})
|
||||
return promise.then(() => stream)
|
||||
})
|
||||
|
||||
if (!checksum) {
|
||||
return streamP
|
||||
@@ -193,7 +204,10 @@ export default class RemoteHandlerAbstract {
|
||||
checksum =>
|
||||
streamP.then(stream => {
|
||||
const { length } = stream
|
||||
stream = (validChecksumOfReadStream(stream, String(checksum).trim()): LaxReadable)
|
||||
stream = (validChecksumOfReadStream(
|
||||
stream,
|
||||
String(checksum).trim()
|
||||
): LaxReadable)
|
||||
stream.length = length
|
||||
|
||||
return stream
|
||||
@@ -207,17 +221,19 @@ export default class RemoteHandlerAbstract {
|
||||
)
|
||||
}
|
||||
|
||||
// write a stream to a file using a temporary file
|
||||
async outputStream(
|
||||
input: Readable | Promise<Readable>,
|
||||
path: string,
|
||||
{ checksum = true, dirMode }: { checksum?: boolean, dirMode?: number } = {}
|
||||
): Promise<void> {
|
||||
path = normalizePath(path)
|
||||
return this._outputStream(await input, normalizePath(path), {
|
||||
checksum,
|
||||
dirMode,
|
||||
})
|
||||
createWriteStream(
|
||||
file: File,
|
||||
options: { end?: number, flags?: string, start?: number } = {}
|
||||
): Promise<LaxWritable> {
|
||||
return timeout.call(
|
||||
this._createWriteStream(
|
||||
typeof file === 'string' ? normalizePath(file) : file,
|
||||
{
|
||||
flags: 'wx',
|
||||
...options,
|
||||
}
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
// Free the resources possibly dedicated to put the remote at work, when it
|
||||
@@ -236,12 +252,18 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async getSize(file: File): Promise<number> {
|
||||
return timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
|
||||
return timeout.call(
|
||||
this._getSize(typeof file === 'string' ? normalizePath(file) : file),
|
||||
this._timeout
|
||||
)
|
||||
}
|
||||
|
||||
async list(
|
||||
dir: string,
|
||||
{ filter, prependDir = false }: { filter?: (name: string) => boolean, prependDir?: boolean } = {}
|
||||
{
|
||||
filter,
|
||||
prependDir = false,
|
||||
}: { filter?: (name: string) => boolean, prependDir?: boolean } = {}
|
||||
): Promise<string[]> {
|
||||
const virtualDir = normalizePath(dir)
|
||||
dir = normalizePath(dir)
|
||||
@@ -260,12 +282,12 @@ export default class RemoteHandlerAbstract {
|
||||
return entries
|
||||
}
|
||||
|
||||
async mkdir(dir: string, { mode }: { mode?: number } = {}): Promise<void> {
|
||||
await this.__mkdir(normalizePath(dir), { mode })
|
||||
async mkdir(dir: string): Promise<void> {
|
||||
await this.__mkdir(normalizePath(dir))
|
||||
}
|
||||
|
||||
async mktree(dir: string, { mode }: { mode?: number } = {}): Promise<void> {
|
||||
await this._mktree(normalizePath(dir), { mode })
|
||||
async mktree(dir: string): Promise<void> {
|
||||
await this._mktree(normalizePath(dir))
|
||||
}
|
||||
|
||||
openFile(path: string, flags: string): Promise<FileDescriptor> {
|
||||
@@ -275,32 +297,65 @@ export default class RemoteHandlerAbstract {
|
||||
async outputFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
{ dirMode, flags = 'wx' }: { dirMode?: number, flags?: string } = {}
|
||||
{ flags = 'wx' }: { flags?: string } = {}
|
||||
): Promise<void> {
|
||||
await this._outputFile(normalizePath(file), data, { dirMode, flags })
|
||||
await this._outputFile(normalizePath(file), data, { flags })
|
||||
}
|
||||
|
||||
async read(file: File, buffer: Buffer, position?: number): Promise<{| bytesRead: number, buffer: Buffer |}> {
|
||||
return this._read(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
|
||||
async read(
|
||||
file: File,
|
||||
buffer: Buffer,
|
||||
position?: number
|
||||
): Promise<{| bytesRead: number, buffer: Buffer |}> {
|
||||
return this._read(
|
||||
typeof file === 'string' ? normalizePath(file) : file,
|
||||
buffer,
|
||||
position
|
||||
)
|
||||
}
|
||||
|
||||
async readFile(file: string, { flags = 'r' }: { flags?: string } = {}): Promise<Buffer> {
|
||||
async readFile(
|
||||
file: string,
|
||||
{ flags = 'r' }: { flags?: string } = {}
|
||||
): Promise<Buffer> {
|
||||
return this._readFile(normalizePath(file), { flags })
|
||||
}
|
||||
|
||||
async rename(oldPath: string, newPath: string, { checksum = false }: Object = {}) {
|
||||
async refreshChecksum(path: string): Promise<void> {
|
||||
path = normalizePath(path)
|
||||
|
||||
const stream = (await this._createReadStream(path, { flags: 'r' })).pipe(
|
||||
createChecksumStream()
|
||||
)
|
||||
stream.resume() // start reading the whole file
|
||||
await this._outputFile(checksumFile(path), await stream.checksum, {
|
||||
flags: 'wx',
|
||||
})
|
||||
}
|
||||
|
||||
async rename(
|
||||
oldPath: string,
|
||||
newPath: string,
|
||||
{ checksum = false }: Object = {}
|
||||
) {
|
||||
oldPath = normalizePath(oldPath)
|
||||
newPath = normalizePath(newPath)
|
||||
|
||||
let p = timeout.call(this._rename(oldPath, newPath), this._timeout)
|
||||
if (checksum) {
|
||||
p = Promise.all([p, this._rename(checksumFile(oldPath), checksumFile(newPath))])
|
||||
p = Promise.all([
|
||||
p,
|
||||
this._rename(checksumFile(oldPath), checksumFile(newPath)),
|
||||
])
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
async rmdir(dir: string): Promise<void> {
|
||||
await timeout.call(this._rmdir(normalizePath(dir)).catch(ignoreEnoent), this._timeout)
|
||||
await timeout.call(
|
||||
this._rmdir(normalizePath(dir)).catch(ignoreEnoent),
|
||||
this._timeout
|
||||
)
|
||||
}
|
||||
|
||||
async rmtree(dir: string): Promise<void> {
|
||||
@@ -365,11 +420,23 @@ export default class RemoteHandlerAbstract {
|
||||
await this._unlink(file).catch(ignoreEnoent)
|
||||
}
|
||||
|
||||
async write(file: File, buffer: Buffer, position: number): Promise<{| bytesWritten: number, buffer: Buffer |}> {
|
||||
await this._write(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
|
||||
async write(
|
||||
file: File,
|
||||
buffer: Buffer,
|
||||
position: number
|
||||
): Promise<{| bytesWritten: number, buffer: Buffer |}> {
|
||||
await this._write(
|
||||
typeof file === 'string' ? normalizePath(file) : file,
|
||||
buffer,
|
||||
position
|
||||
)
|
||||
}
|
||||
|
||||
async writeFile(file: string, data: Data, { flags = 'wx' }: { flags?: string } = {}): Promise<void> {
|
||||
async writeFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
{ flags = 'wx' }: { flags?: string } = {}
|
||||
): Promise<void> {
|
||||
await this._writeFile(normalizePath(file), data, { flags })
|
||||
}
|
||||
|
||||
@@ -379,9 +446,9 @@ export default class RemoteHandlerAbstract {
|
||||
await timeout.call(this._closeFile(fd.fd), this._timeout)
|
||||
}
|
||||
|
||||
async __mkdir(dir: string, { mode }: { mode?: number } = {}): Promise<void> {
|
||||
async __mkdir(dir: string): Promise<void> {
|
||||
try {
|
||||
await this._mkdir(dir, { mode })
|
||||
await this._mkdir(dir)
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'EEXIST') {
|
||||
throw error
|
||||
@@ -407,7 +474,7 @@ export default class RemoteHandlerAbstract {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _createOutputStream(file: File, { dirMode, ...options }: Object = {}): Promise<LaxWritable> {
|
||||
async _createOutputStream(file: File, options: Object): Promise<LaxWritable> {
|
||||
try {
|
||||
return await this._createWriteStream(file, options)
|
||||
} catch (error) {
|
||||
@@ -416,7 +483,7 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
await this._mktree(dirname(file), { mode: dirMode })
|
||||
await this._mktree(dirname(file))
|
||||
return this._createOutputStream(file, options)
|
||||
}
|
||||
|
||||
@@ -447,56 +514,45 @@ export default class RemoteHandlerAbstract {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _mktree(dir: string, { mode }: { mode?: number } = {}): Promise<void> {
|
||||
async _mktree(dir: string): Promise<void> {
|
||||
try {
|
||||
return await this.__mkdir(dir, { mode })
|
||||
return await this.__mkdir(dir)
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
await this._mktree(dirname(dir), { mode })
|
||||
return this._mktree(dir, { mode })
|
||||
await this._mktree(dirname(dir))
|
||||
return this._mktree(dir)
|
||||
}
|
||||
|
||||
async _openFile(path: string, flags: string): Promise<mixed> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _outputFile(file: string, data: Data, { dirMode, flags }: { dirMode?: number, flags?: string }): Promise<void> {
|
||||
async _outputFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
options: { flags?: string }
|
||||
): Promise<void> {
|
||||
try {
|
||||
return await this._writeFile(file, data, { flags })
|
||||
return await this._writeFile(file, data, options)
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
await this._mktree(dirname(file), { mode: dirMode })
|
||||
return this._outputFile(file, data, { flags })
|
||||
await this._mktree(dirname(file))
|
||||
return this._outputFile(file, data, options)
|
||||
}
|
||||
|
||||
async _outputStream(input: Readable, path: string, { checksum, dirMode }: { checksum?: boolean, dirMode?: number }) {
|
||||
const tmpPath = `${dirname(path)}/.${basename(path)}`
|
||||
const output = await this.createOutputStream(tmpPath, {
|
||||
checksum,
|
||||
dirMode,
|
||||
})
|
||||
try {
|
||||
input.pipe(output)
|
||||
await fromEvent(output, 'finish')
|
||||
await output.checksumWritten
|
||||
// $FlowFixMe
|
||||
await input.task
|
||||
await this.rename(tmpPath, path, { checksum })
|
||||
} catch (error) {
|
||||
await this.unlink(tmpPath, { checksum })
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
_read(file: File, buffer: Buffer, position?: number): Promise<{| bytesRead: number, buffer: Buffer |}> {
|
||||
_read(
|
||||
file: File,
|
||||
buffer: Buffer,
|
||||
position?: number
|
||||
): Promise<{| bytesRead: number, buffer: Buffer |}> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
@@ -554,11 +610,19 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
async _writeFd(fd: FileDescriptor, buffer: Buffer, position: number): Promise<void> {
|
||||
async _writeFd(
|
||||
fd: FileDescriptor,
|
||||
buffer: Buffer,
|
||||
position: number
|
||||
): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _writeFile(file: string, data: Data, options: { flags?: string }): Promise<void> {
|
||||
async _writeFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
options: { flags?: string }
|
||||
): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
}
|
||||
@@ -578,7 +642,8 @@ function createPrefixWrapperMethods() {
|
||||
if (
|
||||
hasOwnProperty.call(pPw, name) ||
|
||||
name[0] === '_' ||
|
||||
typeof (value = (descriptor = getOwnPropertyDescriptor(pRha, name)).value) !== 'function'
|
||||
typeof (value = (descriptor = getOwnPropertyDescriptor(pRha, name))
|
||||
.value) !== 'function'
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -42,6 +42,18 @@ describe('createOutputStream()', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('createReadStream()', () => {
|
||||
it(`throws in case of timeout`, async () => {
|
||||
const testHandler = new TestHandler({
|
||||
createReadStream: () => new Promise(() => {}),
|
||||
})
|
||||
|
||||
const promise = testHandler.createReadStream('file')
|
||||
jest.advanceTimersByTime(TIMEOUT)
|
||||
await expect(promise).rejects.toThrowError(TimeoutError)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getInfo()', () => {
|
||||
it('throws in case of timeout', async () => {
|
||||
const testHandler = new TestHandler({
|
||||
|
||||
@@ -27,7 +27,9 @@ const ID_TO_ALGORITHM = invert(ALGORITHM_TO_ID)
|
||||
// const checksumStream = source.pipe(createChecksumStream())
|
||||
// checksumStream.resume() // make the data flow without an output
|
||||
// console.log(await checksumStream.checksum)
|
||||
export const createChecksumStream = (algorithm: string = 'md5'): Transform & { checksum: Promise<string> } => {
|
||||
export const createChecksumStream = (
|
||||
algorithm: string = 'md5'
|
||||
): Transform & { checksum: Promise<string> } => {
|
||||
const algorithmId = ALGORITHM_TO_ID[algorithm]
|
||||
|
||||
if (!algorithmId) {
|
||||
@@ -58,7 +60,10 @@ export const validChecksumOfReadStream = (
|
||||
stream: Readable,
|
||||
expectedChecksum: string
|
||||
): Readable & { checksumVerified: Promise<void> } => {
|
||||
const algorithmId = expectedChecksum.slice(1, expectedChecksum.indexOf('$', 1))
|
||||
const algorithmId = expectedChecksum.slice(
|
||||
1,
|
||||
expectedChecksum.indexOf('$', 1)
|
||||
)
|
||||
|
||||
if (!algorithmId) {
|
||||
throw new Error(`unknown algorithm: ${algorithmId}`)
|
||||
@@ -77,7 +82,11 @@ export const validChecksumOfReadStream = (
|
||||
const checksum = `$${algorithmId}$$${hash.digest('hex')}`
|
||||
|
||||
callback(
|
||||
checksum !== expectedChecksum ? new Error(`Bad checksum (${checksum}), expected: ${expectedChecksum}`) : null
|
||||
checksum !== expectedChecksum
|
||||
? new Error(
|
||||
`Bad checksum (${checksum}), expected: ${expectedChecksum}`
|
||||
)
|
||||
: null
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import 'dotenv/config'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import getStream from 'get-stream'
|
||||
import { forOwn, random } from 'lodash'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
@@ -90,6 +91,31 @@ handlers.forEach(url => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('#createReadStream()', () => {
|
||||
beforeEach(() => handler.outputFile('file', TEST_DATA))
|
||||
|
||||
testWithFileDescriptor('file', 'r', async ({ file, flags }) => {
|
||||
await expect(
|
||||
await getStream.buffer(
|
||||
await handler.createReadStream(file, { flags })
|
||||
)
|
||||
).toEqual(TEST_DATA)
|
||||
})
|
||||
})
|
||||
|
||||
describe('#createWriteStream()', () => {
|
||||
testWithFileDescriptor('file', 'wx', async ({ file, flags }) => {
|
||||
const stream = await handler.createWriteStream(file, { flags })
|
||||
await fromCallback(pipeline, createTestDataStream(), stream)
|
||||
await expect(await handler.readFile('file')).toEqual(TEST_DATA)
|
||||
})
|
||||
|
||||
it('fails if parent dir is missing', async () => {
|
||||
const error = await rejectionOf(handler.createWriteStream('dir/file'))
|
||||
expect(error.code).toBe('ENOENT')
|
||||
})
|
||||
})
|
||||
|
||||
describe('#getInfo()', () => {
|
||||
let info
|
||||
beforeAll(async () => {
|
||||
@@ -126,12 +152,16 @@ handlers.forEach(url => {
|
||||
|
||||
it('can prepend the directory to entries', async () => {
|
||||
await handler.outputFile('dir/file', '')
|
||||
expect(await handler.list('dir', { prependDir: true })).toEqual(['/dir/file'])
|
||||
expect(await handler.list('dir', { prependDir: true })).toEqual([
|
||||
'/dir/file',
|
||||
])
|
||||
})
|
||||
|
||||
it('can prepend the directory to entries', async () => {
|
||||
await handler.outputFile('dir/file', '')
|
||||
expect(await handler.list('dir', { prependDir: true })).toEqual(['/dir/file'])
|
||||
expect(await handler.list('dir', { prependDir: true })).toEqual([
|
||||
'/dir/file',
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
@@ -304,7 +334,10 @@ handlers.forEach(url => {
|
||||
return { offset, expected }
|
||||
})(),
|
||||
'increase file size': (() => {
|
||||
const offset = random(TEST_DATA_LEN - PATCH_DATA_LEN + 1, TEST_DATA_LEN)
|
||||
const offset = random(
|
||||
TEST_DATA_LEN - PATCH_DATA_LEN + 1,
|
||||
TEST_DATA_LEN
|
||||
)
|
||||
|
||||
const expected = Buffer.alloc(offset + PATCH_DATA_LEN)
|
||||
TEST_DATA.copy(expected)
|
||||
|
||||
@@ -4,7 +4,6 @@ import execa from 'execa'
|
||||
import type RemoteHandler from './abstract'
|
||||
import RemoteHandlerLocal from './local'
|
||||
import RemoteHandlerNfs from './nfs'
|
||||
import RemoteHandlerS3 from './s3'
|
||||
import RemoteHandlerSmb from './smb'
|
||||
import RemoteHandlerSmbMount from './smb-mount'
|
||||
|
||||
@@ -14,7 +13,6 @@ export type Remote = { url: string }
|
||||
const HANDLERS = {
|
||||
file: RemoteHandlerLocal,
|
||||
nfs: RemoteHandlerNfs,
|
||||
s3: RemoteHandlerS3,
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
@@ -63,7 +63,9 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _getSize(file) {
|
||||
const stats = await fs.stat(this._getFilePath(typeof file === 'string' ? file : file.path))
|
||||
const stats = await fs.stat(
|
||||
this._getFilePath(typeof file === 'string' ? file : file.path)
|
||||
)
|
||||
return stats.size
|
||||
}
|
||||
|
||||
@@ -71,8 +73,8 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
return fs.readdir(this._getFilePath(dir))
|
||||
}
|
||||
|
||||
_mkdir(dir, { mode }) {
|
||||
return fs.mkdir(this._getFilePath(dir), { mode })
|
||||
_mkdir(dir) {
|
||||
return fs.mkdir(this._getFilePath(dir))
|
||||
}
|
||||
|
||||
async _openFile(path, flags) {
|
||||
@@ -83,7 +85,13 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
const needsClose = typeof file === 'string'
|
||||
file = needsClose ? await fs.open(this._getFilePath(file), 'r') : file.fd
|
||||
try {
|
||||
return await fs.read(file, buffer, 0, buffer.length, position === undefined ? null : position)
|
||||
return await fs.read(
|
||||
file,
|
||||
buffer,
|
||||
0,
|
||||
buffer.length,
|
||||
position === undefined ? null : position
|
||||
)
|
||||
} finally {
|
||||
if (needsClose) {
|
||||
await fs.close(file)
|
||||
|
||||
@@ -2,13 +2,15 @@ import { parse } from 'xo-remote-parser'
|
||||
|
||||
import MountHandler from './_mount'
|
||||
|
||||
const DEFAULT_NFS_OPTIONS = 'vers=3'
|
||||
|
||||
export default class NfsHandler extends MountHandler {
|
||||
constructor(remote, opts) {
|
||||
const { host, port, path } = parse(remote.url)
|
||||
super(remote, opts, {
|
||||
type: 'nfs',
|
||||
device: `${host}${port !== undefined ? ':' + port : ''}:${path}`,
|
||||
defaultOptions: 'vers=3',
|
||||
options: DEFAULT_NFS_OPTIONS,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,283 +0,0 @@
|
||||
import aws from '@sullux/aws-sdk'
|
||||
import assert from 'assert'
|
||||
import http from 'http'
|
||||
import https from 'https'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
import { createChecksumStream } from './checksum'
|
||||
|
||||
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
|
||||
|
||||
// limits: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
|
||||
const MIN_PART_SIZE = 1024 * 1024 * 5 // 5MB
|
||||
const MAX_PART_SIZE = 1024 * 1024 * 1024 * 5 // 5GB
|
||||
const MAX_PARTS_COUNT = 10000
|
||||
const MAX_OBJECT_SIZE = 1024 * 1024 * 1024 * 1024 * 5 // 5TB
|
||||
const IDEAL_FRAGMENT_SIZE = Math.ceil(MAX_OBJECT_SIZE / MAX_PARTS_COUNT) // the smallest fragment size that still allows a 5TB upload in 10000 fragments, about 524MB
|
||||
|
||||
const USE_SSL = true
|
||||
export default class S3Handler extends RemoteHandlerAbstract {
|
||||
constructor(remote, _opts) {
|
||||
super(remote)
|
||||
const { host, path, username, password } = parse(remote.url)
|
||||
// https://www.zenko.io/blog/first-things-first-getting-started-scality-s3-server/
|
||||
const params = {
|
||||
accessKeyId: username,
|
||||
apiVersion: '2006-03-01',
|
||||
endpoint: host,
|
||||
s3ForcePathStyle: true,
|
||||
secretAccessKey: password,
|
||||
signatureVersion: 'v4',
|
||||
httpOptions: {
|
||||
// long timeout helps big backups
|
||||
timeout: 600000,
|
||||
},
|
||||
}
|
||||
if (!USE_SSL) {
|
||||
// remove @sullux/aws-sdk ssl agent
|
||||
params.httpOptions.agent = new http.Agent()
|
||||
params.sslEnabled = false
|
||||
} else {
|
||||
// this replaces @sullux/aws-sdk agent, whose activation of keepalive seems to provoke resource exhaustion
|
||||
params.httpOptions.agent = new https.Agent()
|
||||
}
|
||||
this._s3 = aws(params).s3
|
||||
|
||||
const splitPath = path.split('/').filter(s => s.length)
|
||||
this._bucket = splitPath.shift()
|
||||
this._dir = splitPath.join('/')
|
||||
}
|
||||
|
||||
get type() {
|
||||
return 's3'
|
||||
}
|
||||
|
||||
_createParams(file) {
|
||||
return { Bucket: this._bucket, Key: this._dir + file }
|
||||
}
|
||||
|
||||
async _outputStream(input, path, { checksum }) {
|
||||
let inputStream = input
|
||||
if (checksum) {
|
||||
const checksumStream = createChecksumStream()
|
||||
const forwardError = error => {
|
||||
checksumStream.emit('error', error)
|
||||
}
|
||||
input.pipe(checksumStream)
|
||||
input.on('error', forwardError)
|
||||
inputStream = checksumStream
|
||||
}
|
||||
await this._s3.upload(
|
||||
{
|
||||
...this._createParams(path),
|
||||
Body: inputStream,
|
||||
},
|
||||
{ partSize: IDEAL_FRAGMENT_SIZE, queueSize: 1 }
|
||||
)
|
||||
if (checksum) {
|
||||
const checksum = await inputStream.checksum
|
||||
const params = {
|
||||
...this._createParams(path + '.checksum'),
|
||||
Body: checksum,
|
||||
}
|
||||
await this._s3.upload(params)
|
||||
}
|
||||
await input.task
|
||||
}
|
||||
|
||||
async _writeFile(file, data, options) {
|
||||
return this._s3.putObject({ ...this._createParams(file), Body: data })
|
||||
}
|
||||
|
||||
async _createReadStream(file, options) {
|
||||
// https://github.com/Sullux/aws-sdk/issues/11
|
||||
return this._s3.getObject.raw(this._createParams(file)).createReadStream()
|
||||
}
|
||||
|
||||
async _unlink(file) {
|
||||
return this._s3.deleteObject(this._createParams(file))
|
||||
}
|
||||
|
||||
async _list(dir) {
|
||||
function splitPath(path) {
|
||||
return path.split('/').filter(d => d.length)
|
||||
}
|
||||
|
||||
const prefix = [this._dir, dir].join('/')
|
||||
const splitPrefix = splitPath(prefix)
|
||||
const result = await this._s3.listObjectsV2({
|
||||
Bucket: this._bucket,
|
||||
Prefix: splitPrefix.join('/'),
|
||||
})
|
||||
const uniq = new Set()
|
||||
for (const entry of result.Contents) {
|
||||
const line = splitPath(entry.Key)
|
||||
if (line.length > splitPrefix.length) {
|
||||
uniq.add(line[splitPrefix.length])
|
||||
}
|
||||
}
|
||||
return [...uniq]
|
||||
}
|
||||
|
||||
async _rename(oldPath, newPath) {
|
||||
const size = await this._getSize(oldPath)
|
||||
const multipartParams = await this._s3.createMultipartUpload({ ...this._createParams(newPath) })
|
||||
const param2 = { ...multipartParams, CopySource: `/${this._bucket}/${this._dir}${oldPath}` }
|
||||
try {
|
||||
const parts = []
|
||||
let start = 0
|
||||
while (start < size) {
|
||||
const range = `bytes=${start}-${Math.min(start + MAX_PART_SIZE, size) - 1}`
|
||||
const partParams = { ...param2, PartNumber: parts.length + 1, CopySourceRange: range }
|
||||
const upload = await this._s3.uploadPartCopy(partParams)
|
||||
parts.push({ ETag: upload.CopyPartResult.ETag, PartNumber: partParams.PartNumber })
|
||||
start += MAX_PART_SIZE
|
||||
}
|
||||
await this._s3.completeMultipartUpload({ ...multipartParams, MultipartUpload: { Parts: parts } })
|
||||
} catch (e) {
|
||||
await this._s3.abortMultipartUpload(multipartParams)
|
||||
throw e
|
||||
}
|
||||
await this._s3.deleteObject(this._createParams(oldPath))
|
||||
}
|
||||
|
||||
async _getSize(file) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.fd
|
||||
}
|
||||
const result = await this._s3.headObject(this._createParams(file))
|
||||
return +result.ContentLength
|
||||
}
|
||||
|
||||
async _read(file, buffer, position = 0) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.fd
|
||||
}
|
||||
const params = this._createParams(file)
|
||||
params.Range = `bytes=${position}-${position + buffer.length - 1}`
|
||||
const result = await this._s3.getObject(params)
|
||||
result.Body.copy(buffer)
|
||||
return { bytesRead: result.Body.length, buffer }
|
||||
}
|
||||
|
||||
async _write(file, buffer, position) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.fd
|
||||
}
|
||||
const uploadParams = this._createParams(file)
|
||||
let fileSize
|
||||
try {
|
||||
fileSize = +(await this._s3.headObject(uploadParams)).ContentLength
|
||||
} catch (e) {
|
||||
if (e.code === 'NotFound') {
|
||||
fileSize = 0
|
||||
} else {
|
||||
throw e
|
||||
}
|
||||
}
|
||||
if (fileSize < MIN_PART_SIZE) {
|
||||
const resultBuffer = Buffer.alloc(Math.max(fileSize, position + buffer.length))
|
||||
const fileContent = fileSize !== 0 ? (await this._s3.getObject(uploadParams)).Body : Buffer.alloc(0)
|
||||
fileContent.copy(resultBuffer)
|
||||
buffer.copy(resultBuffer, position)
|
||||
await this._s3.putObject({ ...uploadParams, Body: resultBuffer })
|
||||
return { buffer, bytesWritten: buffer.length }
|
||||
} else {
|
||||
// using this trick: https://stackoverflow.com/a/38089437/72637
|
||||
// multipart fragments have a minimum size of 5Mo and a max of 5Go unless they are last
|
||||
// splitting the file in 3 parts: [prefix, edit, suffix]
|
||||
// if `prefix` is bigger than 5Mo, it will be sourced from uploadPartCopy()
|
||||
// otherwise otherwise it will be downloaded, concatenated to `edit`
|
||||
// `edit` will always be an upload part
|
||||
// `suffix` will always be sourced from uploadPartCopy()
|
||||
// Then everything will be sliced in 5Gb parts before getting uploaded
|
||||
const multipartParams = await this._s3.createMultipartUpload(uploadParams)
|
||||
const copyMultipartParams = {
|
||||
...multipartParams,
|
||||
CopySource: `/${this._bucket}/${this._dir + file}`,
|
||||
}
|
||||
try {
|
||||
const parts = []
|
||||
const prefixSize = position
|
||||
let suffixOffset = prefixSize + buffer.length
|
||||
let suffixSize = Math.max(0, fileSize - suffixOffset)
|
||||
let hasSuffix = suffixSize > 0
|
||||
let editBuffer = buffer
|
||||
let editBufferOffset = position
|
||||
let partNumber = 1
|
||||
let prefixPosition = 0
|
||||
// use floor() so that last fragment is handled in the if bellow
|
||||
let fragmentsCount = Math.floor(prefixSize / MAX_PART_SIZE)
|
||||
const prefixFragmentSize = MAX_PART_SIZE
|
||||
let prefixLastFragmentSize = prefixSize - prefixFragmentSize * fragmentsCount
|
||||
if (prefixLastFragmentSize >= MIN_PART_SIZE) {
|
||||
// the last fragment of the prefix is smaller than MAX_PART_SIZE, but bigger than the minimum
|
||||
// so we can copy it too
|
||||
fragmentsCount++
|
||||
prefixLastFragmentSize = 0
|
||||
}
|
||||
for (let i = 0; i < fragmentsCount; i++) {
|
||||
const fragmentEnd = Math.min(prefixPosition + prefixFragmentSize, prefixSize)
|
||||
assert.strictEqual(fragmentEnd - prefixPosition <= MAX_PART_SIZE, true)
|
||||
const range = `bytes=${prefixPosition}-${fragmentEnd - 1}`
|
||||
const copyPrefixParams = { ...copyMultipartParams, PartNumber: partNumber++, CopySourceRange: range }
|
||||
const part = await this._s3.uploadPartCopy(copyPrefixParams)
|
||||
parts.push({ ETag: part.CopyPartResult.ETag, PartNumber: copyPrefixParams.PartNumber })
|
||||
prefixPosition += prefixFragmentSize
|
||||
}
|
||||
if (prefixLastFragmentSize) {
|
||||
// grab everything from the prefix that was too small to be copied, download and merge to the edit buffer.
|
||||
const downloadParams = { ...uploadParams, Range: `bytes=${prefixPosition}-${prefixSize - 1}` }
|
||||
const prefixBuffer = prefixSize > 0 ? (await this._s3.getObject(downloadParams)).Body : Buffer.alloc(0)
|
||||
editBuffer = Buffer.concat([prefixBuffer, buffer])
|
||||
editBufferOffset -= prefixLastFragmentSize
|
||||
}
|
||||
if (hasSuffix && editBuffer.length < MIN_PART_SIZE) {
|
||||
// the edit fragment is too short and is not the last fragment
|
||||
// let's steal from the suffix fragment to reach the minimum size
|
||||
// the suffix might be too short and itself entirely absorbed in the edit fragment, making it the last one.
|
||||
const complementSize = Math.min(MIN_PART_SIZE - editBuffer.length, suffixSize)
|
||||
const complementOffset = editBufferOffset + editBuffer.length
|
||||
suffixOffset += complementSize
|
||||
suffixSize -= complementSize
|
||||
hasSuffix = suffixSize > 0
|
||||
const prefixRange = `bytes=${complementOffset}-${complementOffset + complementSize - 1}`
|
||||
const downloadParams = { ...uploadParams, Range: prefixRange }
|
||||
const complementBuffer = (await this._s3.getObject(downloadParams)).Body
|
||||
editBuffer = Buffer.concat([editBuffer, complementBuffer])
|
||||
}
|
||||
const editParams = { ...multipartParams, Body: editBuffer, PartNumber: partNumber++ }
|
||||
const editPart = await this._s3.uploadPart(editParams)
|
||||
parts.push({ ETag: editPart.ETag, PartNumber: editParams.PartNumber })
|
||||
if (hasSuffix) {
|
||||
// use ceil because the last fragment can be arbitrarily small.
|
||||
const suffixFragments = Math.ceil(suffixSize / MAX_PART_SIZE)
|
||||
let suffixFragmentOffset = suffixOffset
|
||||
for (let i = 0; i < suffixFragments; i++) {
|
||||
const fragmentEnd = suffixFragmentOffset + MAX_PART_SIZE
|
||||
assert.strictEqual(Math.min(fileSize, fragmentEnd) - suffixFragmentOffset <= MAX_PART_SIZE, true)
|
||||
const suffixRange = `bytes=${suffixFragmentOffset}-${Math.min(fileSize, fragmentEnd) - 1}`
|
||||
const copySuffixParams = { ...copyMultipartParams, PartNumber: partNumber++, CopySourceRange: suffixRange }
|
||||
const suffixPart = (await this._s3.uploadPartCopy(copySuffixParams)).CopyPartResult
|
||||
parts.push({ ETag: suffixPart.ETag, PartNumber: copySuffixParams.PartNumber })
|
||||
suffixFragmentOffset = fragmentEnd
|
||||
}
|
||||
}
|
||||
await this._s3.completeMultipartUpload({
|
||||
...multipartParams,
|
||||
MultipartUpload: { Parts: parts },
|
||||
})
|
||||
} catch (e) {
|
||||
await this._s3.abortMultipartUpload(multipartParams)
|
||||
throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _openFile(path, flags) {
|
||||
return path
|
||||
}
|
||||
|
||||
async _closeFile(fd) {}
|
||||
}
|
||||
@@ -5,7 +5,9 @@ import normalizePath from './_normalizePath'
|
||||
|
||||
export default class SmbMountHandler extends MountHandler {
|
||||
constructor(remote, opts) {
|
||||
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
|
||||
const { domain = 'WORKGROUP', host, password, path, username } = parse(
|
||||
remote.url
|
||||
)
|
||||
super(remote, opts, {
|
||||
type: 'cifs',
|
||||
device: '//' + host + normalizePath(path),
|
||||
|
||||
@@ -17,7 +17,8 @@ const normalizeError = (error, shouldBeDirectory) => {
|
||||
? wrapError(error, 'EISDIR')
|
||||
: code === 'STATUS_NOT_A_DIRECTORY'
|
||||
? wrapError(error, 'ENOTDIR')
|
||||
: code === 'STATUS_OBJECT_NAME_NOT_FOUND' || code === 'STATUS_OBJECT_PATH_NOT_FOUND'
|
||||
: code === 'STATUS_OBJECT_NAME_NOT_FOUND' ||
|
||||
code === 'STATUS_OBJECT_PATH_NOT_FOUND'
|
||||
? wrapError(error, 'ENOENT')
|
||||
: code === 'STATUS_OBJECT_NAME_COLLISION'
|
||||
? wrapError(error, 'EEXIST')
|
||||
@@ -43,7 +44,12 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
_getFilePath(file) {
|
||||
return this._prefix + (typeof file === 'string' ? file : file.path).slice(1).replace(/\//g, '\\')
|
||||
return (
|
||||
this._prefix +
|
||||
(typeof file === 'string' ? file : file.path)
|
||||
.slice(1)
|
||||
.replace(/\//g, '\\')
|
||||
)
|
||||
}
|
||||
|
||||
_dirname(file) {
|
||||
@@ -90,13 +96,15 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
return this._client.readdir(this._getFilePath(dir)).catch(normalizeDirError)
|
||||
}
|
||||
|
||||
_mkdir(dir, { mode }) {
|
||||
return this._client.mkdir(this._getFilePath(dir), mode).catch(normalizeDirError)
|
||||
_mkdir(dir) {
|
||||
return this._client.mkdir(this._getFilePath(dir)).catch(normalizeDirError)
|
||||
}
|
||||
|
||||
// TODO: add flags
|
||||
_openFile(path, flags) {
|
||||
return this._client.open(this._getFilePath(path), flags).catch(normalizeError)
|
||||
return this._client
|
||||
.open(this._getFilePath(path), flags)
|
||||
.catch(normalizeError)
|
||||
}
|
||||
|
||||
async _read(file, buffer, position) {
|
||||
@@ -115,7 +123,9 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
_readFile(file, options) {
|
||||
return this._client.readFile(this._getFilePath(file), options).catch(normalizeError)
|
||||
return this._client
|
||||
.readFile(this._getFilePath(file), options)
|
||||
.catch(normalizeError)
|
||||
}
|
||||
|
||||
_rename(oldPath, newPath) {
|
||||
@@ -146,7 +156,9 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
_truncate(file, len) {
|
||||
return this._client.truncate(this._getFilePath(file), len).catch(normalizeError)
|
||||
return this._client
|
||||
.truncate(this._getFilePath(file), len)
|
||||
.catch(normalizeError)
|
||||
}
|
||||
|
||||
_unlink(file) {
|
||||
@@ -158,6 +170,8 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
_writeFile(file, data, options) {
|
||||
return this._client.writeFile(this._getFilePath(file), data, options).catch(normalizeError)
|
||||
return this._client
|
||||
.writeFile(this._getFilePath(file), data, options)
|
||||
.catch(normalizeError)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.16.0"
|
||||
"promise-toolbox": "^0.15.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -49,7 +49,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,7 +47,10 @@ const createTransport = config => {
|
||||
return transport
|
||||
}
|
||||
|
||||
const symbol = typeof Symbol !== 'undefined' ? Symbol.for('@xen-orchestra/log') : '@@@xen-orchestra/log'
|
||||
const symbol =
|
||||
typeof Symbol !== 'undefined'
|
||||
? Symbol.for('@xen-orchestra/log')
|
||||
: '@@@xen-orchestra/log'
|
||||
|
||||
const { env } = process
|
||||
global[symbol] = createTransport({
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import createTransport from './transports/console'
|
||||
import LEVELS, { resolve } from './levels'
|
||||
|
||||
const symbol = typeof Symbol !== 'undefined' ? Symbol.for('@xen-orchestra/log') : '@@@xen-orchestra/log'
|
||||
const symbol =
|
||||
typeof Symbol !== 'undefined'
|
||||
? Symbol.for('@xen-orchestra/log')
|
||||
: '@@@xen-orchestra/log'
|
||||
if (!(symbol in global)) {
|
||||
// the default behavior, without requiring `configure` is to avoid
|
||||
// logging anything unless it's a real error
|
||||
@@ -61,7 +64,9 @@ prototype.wrap = function (message, fn) {
|
||||
try {
|
||||
const result = fn.apply(this, arguments)
|
||||
const then = result != null && result.then
|
||||
return typeof then === 'function' ? then.call(result, warnAndRethrow) : result
|
||||
return typeof then === 'function'
|
||||
? then.call(result, warnAndRethrow)
|
||||
: result
|
||||
} catch (error) {
|
||||
warnAndRethrow(error)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,12 @@ import LEVELS, { NAMES } from '../levels'
|
||||
const { DEBUG, ERROR, FATAL, INFO, WARN } = LEVELS
|
||||
|
||||
let formatLevel, formatNamespace
|
||||
if (process.stdout !== undefined && process.stdout.isTTY && process.stderr !== undefined && process.stderr.isTTY) {
|
||||
if (
|
||||
process.stdout !== undefined &&
|
||||
process.stdout.isTTY &&
|
||||
process.stderr !== undefined &&
|
||||
process.stderr.isTTY
|
||||
) {
|
||||
const ansi = (style, str) => `\x1b[${style}m${str}\x1b[0m`
|
||||
|
||||
const LEVEL_STYLES = {
|
||||
@@ -66,7 +71,10 @@ if (process.stdout !== undefined && process.stdout.isTTY && process.stderr !== u
|
||||
// const g = f(3)
|
||||
// const b = f(1)
|
||||
// return ansi(`38;2;${r};${g};${b}`, namespace)
|
||||
return ansi(`1;38;5;${NAMESPACE_COLORS[Math.abs(hash) % NAMESPACE_COLORS.length]}`, namespace)
|
||||
return ansi(
|
||||
`1;38;5;${NAMESPACE_COLORS[Math.abs(hash) % NAMESPACE_COLORS.length]}`,
|
||||
namespace
|
||||
)
|
||||
}
|
||||
} else {
|
||||
formatLevel = str => NAMES[str]
|
||||
@@ -76,10 +84,21 @@ if (process.stdout !== undefined && process.stdout.isTTY && process.stderr !== u
|
||||
const consoleTransport = ({ data, level, namespace, message, time }) => {
|
||||
const fn =
|
||||
/* eslint-disable no-console */
|
||||
level < INFO ? console.log : level < WARN ? console.info : level < ERROR ? console.warn : console.error
|
||||
level < INFO
|
||||
? console.log
|
||||
: level < WARN
|
||||
? console.info
|
||||
: level < ERROR
|
||||
? console.warn
|
||||
: console.error
|
||||
/* eslint-enable no-console */
|
||||
|
||||
const args = [time.toISOString(), formatNamespace(namespace), formatLevel(level), message]
|
||||
const args = [
|
||||
time.toISOString(),
|
||||
formatNamespace(namespace),
|
||||
formatLevel(level),
|
||||
message,
|
||||
]
|
||||
if (data != null) {
|
||||
args.push(data)
|
||||
}
|
||||
|
||||
@@ -54,7 +54,11 @@ export default ({
|
||||
transporter.sendMail(
|
||||
{
|
||||
subject: evalTemplate(subject, key =>
|
||||
key === 'level' ? NAMES[log.level] : key === 'time' ? log.time.toISOString() : log[key]
|
||||
key === 'level'
|
||||
? NAMES[log.level]
|
||||
: key === 'time'
|
||||
? log.time.toISOString()
|
||||
: log[key]
|
||||
),
|
||||
text: prettyFormat(log.data),
|
||||
},
|
||||
|
||||
@@ -4,14 +4,16 @@ import escapeRegExp from 'lodash/escapeRegExp'
|
||||
|
||||
const TPL_RE = /\{\{(.+?)\}\}/g
|
||||
export const evalTemplate = (tpl, data) => {
|
||||
const getData = typeof data === 'function' ? (_, key) => data(key) : (_, key) => data[key]
|
||||
const getData =
|
||||
typeof data === 'function' ? (_, key) => data(key) : (_, key) => data[key]
|
||||
|
||||
return tpl.replace(TPL_RE, getData)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const compileGlobPatternFragment = pattern => pattern.split('*').map(escapeRegExp).join('.*')
|
||||
const compileGlobPatternFragment = pattern =>
|
||||
pattern.split('*').map(escapeRegExp).join('.*')
|
||||
|
||||
export const compileGlobPattern = pattern => {
|
||||
const no = []
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -20,7 +20,9 @@ const isIgnoredStaticProperty = name => name in IGNORED_STATIC_PROPERTIES
|
||||
const ownKeys =
|
||||
(typeof Reflect !== 'undefined' && Reflect.ownKeys) ||
|
||||
(({ getOwnPropertyNames: names, getOwnPropertySymbols: symbols }) =>
|
||||
symbols !== undefined ? obj => names(obj).concat(symbols(obj)) : names)(Object)
|
||||
symbols !== undefined ? obj => names(obj).concat(symbols(obj)) : names)(
|
||||
Object
|
||||
)
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -48,7 +50,10 @@ const mixin = Mixins => Class => {
|
||||
throw new Error(`${name}#${prop} is already defined`)
|
||||
}
|
||||
|
||||
;(descriptors[prop] = getOwnPropertyDescriptor(Mixin, prop)).enumerable = false // Object methods are enumerable but class methods are not.
|
||||
;(descriptors[prop] = getOwnPropertyDescriptor(
|
||||
Mixin,
|
||||
prop
|
||||
)).enumerable = false // Object methods are enumerable but class methods are not.
|
||||
}
|
||||
})
|
||||
defineProperties(prototype, descriptors)
|
||||
@@ -76,7 +81,11 @@ const mixin = Mixins => Class => {
|
||||
throw new Error(`${name}#${prop} is already defined`)
|
||||
}
|
||||
|
||||
descriptors[prop] = getBoundPropertyDescriptor(prototype, prop, mixinInstance)
|
||||
descriptors[prop] = getBoundPropertyDescriptor(
|
||||
prototype,
|
||||
prop,
|
||||
mixinInstance
|
||||
)
|
||||
}
|
||||
defineProperties(instance, descriptors)
|
||||
}
|
||||
@@ -92,7 +101,8 @@ const mixin = Mixins => Class => {
|
||||
!(
|
||||
isIgnoredStaticProperty(prop) &&
|
||||
// if they already exist...
|
||||
(descriptor = getOwnPropertyDescriptor(DecoratedClass, prop)) !== undefined &&
|
||||
(descriptor = getOwnPropertyDescriptor(DecoratedClass, prop)) !==
|
||||
undefined &&
|
||||
// and are not configurable.
|
||||
!descriptor.configurable
|
||||
)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
@@ -1,24 +0,0 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
@@ -1,141 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @xen-orchestra/openflow
|
||||
|
||||
[](https://npmjs.org/package/@xen-orchestra/openflow)  [](https://bundlephobia.com/result?p=@xen-orchestra/openflow) [](https://npmjs.org/package/@xen-orchestra/openflow)
|
||||
|
||||
> Pack and unpack OpenFlow messages
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/openflow):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/openflow
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Unpacking a received OpenFlow message from a socket:
|
||||
|
||||
```js
|
||||
import openflow from '@xen-orchestra/openflow'
|
||||
import parse from '@xen-orchestra/openflow/parse-socket'
|
||||
|
||||
const version = openflow.versions.openFlow11
|
||||
const ofProtocol = openflow.protocols[version]
|
||||
|
||||
function parseOpenFlowMessages(socket) {
|
||||
for await (const msg of parse(socket)) {
|
||||
if (msg.header !== undefined) {
|
||||
const ofType = msg.header.type
|
||||
switch (ofType) {
|
||||
case ofProtocol.type.hello:
|
||||
// Handle OFPT_HELLO
|
||||
break
|
||||
case ofProtocol.type.error:
|
||||
// Handle OFPT_ERROR
|
||||
break
|
||||
case ofProtocol.type.echoRequest:
|
||||
// Handle OFPT_ECHO_REQUEST
|
||||
break
|
||||
case ofProtocol.type.packetIn:
|
||||
// Handle OFPT_PACKET_IN
|
||||
break
|
||||
case ofProtocol.type.featuresReply:
|
||||
// Handle OFPT_FEATURES_REPLY
|
||||
break
|
||||
case ofProtocol.type.getConfigReply:
|
||||
// Handle OFPT_GET_CONFIG_REPLY
|
||||
break
|
||||
case ofProtocol.type.portStatus:
|
||||
// Handle OFPT_PORT_STATUS
|
||||
break
|
||||
case ofProtocol.type.flowRemoved:
|
||||
// Handle OFPT_FLOW_REMOVED
|
||||
break
|
||||
default:
|
||||
// Error: Invalid type
|
||||
break
|
||||
}
|
||||
} else {
|
||||
// Error: Message is unparseable
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Unpacking a OpenFlow message from a buffer:
|
||||
|
||||
```js
|
||||
import openflow from '@xen-orchestra/openflow'
|
||||
|
||||
const version = openflow.versions.openFlow11
|
||||
const ofProtocol = openflow.protocols[version]
|
||||
|
||||
function processOpenFlowMessage(buf) {
|
||||
const unpacked = openflow.unpack(buf)
|
||||
const ofType = unpacked.header.type
|
||||
switch (ofType) {
|
||||
case ofProtocol.type.hello:
|
||||
// Handle OFPT_HELLO
|
||||
break
|
||||
case ofProtocol.type.error:
|
||||
// Handle OFPT_ERROR
|
||||
break
|
||||
case ofProtocol.type.echoRequest:
|
||||
// Handle OFPT_ECHO_REQUEST
|
||||
break
|
||||
case ofProtocol.type.packetIn:
|
||||
// Handle OFPT_PACKET_IN
|
||||
break
|
||||
case ofProtocol.type.featuresReply:
|
||||
// Handle OFPT_FEATURES_REPLY
|
||||
break
|
||||
case ofProtocol.type.getConfigReply:
|
||||
// Handle OFPT_GET_CONFIG_REPLY
|
||||
break
|
||||
case ofProtocol.type.portStatus:
|
||||
// Handle OFPT_PORT_STATUS
|
||||
break
|
||||
case ofProtocol.type.flowRemoved:
|
||||
// Handle OFPT_FLOW_REMOVED
|
||||
break
|
||||
default:
|
||||
// Error: Invalid type
|
||||
break
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Packing an OpenFlow OFPT_HELLO message:
|
||||
|
||||
```js
|
||||
import openflow from '@xen-orchestra/openflow'
|
||||
|
||||
const version = openflow.versions.openFlow11
|
||||
const ofProtocol = openflow.protocols[version]
|
||||
|
||||
const buf = openflow.pack({
|
||||
header: {
|
||||
version,
|
||||
type: ofProtocol.type.hello,
|
||||
xid: 1,
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,108 +0,0 @@
|
||||
Unpacking a received OpenFlow message from a socket:
|
||||
|
||||
```js
|
||||
import openflow from '@xen-orchestra/openflow'
|
||||
import parse from '@xen-orchestra/openflow/parse-socket'
|
||||
|
||||
const version = openflow.versions.openFlow11
|
||||
const ofProtocol = openflow.protocols[version]
|
||||
|
||||
function parseOpenFlowMessages(socket) {
|
||||
for await (const msg of parse(socket)) {
|
||||
if (msg.header !== undefined) {
|
||||
const ofType = msg.header.type
|
||||
switch (ofType) {
|
||||
case ofProtocol.type.hello:
|
||||
// Handle OFPT_HELLO
|
||||
break
|
||||
case ofProtocol.type.error:
|
||||
// Handle OFPT_ERROR
|
||||
break
|
||||
case ofProtocol.type.echoRequest:
|
||||
// Handle OFPT_ECHO_REQUEST
|
||||
break
|
||||
case ofProtocol.type.packetIn:
|
||||
// Handle OFPT_PACKET_IN
|
||||
break
|
||||
case ofProtocol.type.featuresReply:
|
||||
// Handle OFPT_FEATURES_REPLY
|
||||
break
|
||||
case ofProtocol.type.getConfigReply:
|
||||
// Handle OFPT_GET_CONFIG_REPLY
|
||||
break
|
||||
case ofProtocol.type.portStatus:
|
||||
// Handle OFPT_PORT_STATUS
|
||||
break
|
||||
case ofProtocol.type.flowRemoved:
|
||||
// Handle OFPT_FLOW_REMOVED
|
||||
break
|
||||
default:
|
||||
// Error: Invalid type
|
||||
break
|
||||
}
|
||||
} else {
|
||||
// Error: Message is unparseable
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Unpacking a OpenFlow message from a buffer:
|
||||
|
||||
```js
|
||||
import openflow from '@xen-orchestra/openflow'
|
||||
|
||||
const version = openflow.versions.openFlow11
|
||||
const ofProtocol = openflow.protocols[version]
|
||||
|
||||
function processOpenFlowMessage(buf) {
|
||||
const unpacked = openflow.unpack(buf)
|
||||
const ofType = unpacked.header.type
|
||||
switch (ofType) {
|
||||
case ofProtocol.type.hello:
|
||||
// Handle OFPT_HELLO
|
||||
break
|
||||
case ofProtocol.type.error:
|
||||
// Handle OFPT_ERROR
|
||||
break
|
||||
case ofProtocol.type.echoRequest:
|
||||
// Handle OFPT_ECHO_REQUEST
|
||||
break
|
||||
case ofProtocol.type.packetIn:
|
||||
// Handle OFPT_PACKET_IN
|
||||
break
|
||||
case ofProtocol.type.featuresReply:
|
||||
// Handle OFPT_FEATURES_REPLY
|
||||
break
|
||||
case ofProtocol.type.getConfigReply:
|
||||
// Handle OFPT_GET_CONFIG_REPLY
|
||||
break
|
||||
case ofProtocol.type.portStatus:
|
||||
// Handle OFPT_PORT_STATUS
|
||||
break
|
||||
case ofProtocol.type.flowRemoved:
|
||||
// Handle OFPT_FLOW_REMOVED
|
||||
break
|
||||
default:
|
||||
// Error: Invalid type
|
||||
break
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Packing an OpenFlow OFPT_HELLO message:
|
||||
|
||||
```js
|
||||
import openflow from '@xen-orchestra/openflow'
|
||||
|
||||
const version = openflow.versions.openFlow11
|
||||
const ofProtocol = openflow.protocols[version]
|
||||
|
||||
const buf = openflow.pack({
|
||||
header: {
|
||||
version,
|
||||
type: ofProtocol.type.hello,
|
||||
xid: 1,
|
||||
},
|
||||
})
|
||||
```
|
||||
@@ -1,40 +0,0 @@
|
||||
{
|
||||
"description": "Pack and unpack OpenFlow messages",
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/openflow",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/openflow",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/openflow",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"main": "dist/",
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"postversion": "npm publish --access public",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.7.4",
|
||||
"@babel/core": "^7.7.4",
|
||||
"@babel/preset-env": "^7.7.4",
|
||||
"cross": "^1.0.0",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/read-chunk": "^0.1.0"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
module.exports = require('./dist/parse-socket')
|
||||
@@ -1,9 +0,0 @@
|
||||
export default {
|
||||
size: 8,
|
||||
offsets: {
|
||||
version: 0,
|
||||
type: 1,
|
||||
length: 2,
|
||||
xid: 4,
|
||||
},
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
import get from './util/get-from-map'
|
||||
import ofVersion from './version'
|
||||
// TODO: More openflow versions
|
||||
import of11 from './openflow-11/index'
|
||||
import scheme from './default-header-scheme'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OPENFLOW = {
|
||||
[ofVersion.openFlow11]: of11,
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
versions: ofVersion,
|
||||
protocols: { [ofVersion.openFlow11]: of11.protocol },
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
pack: object => {
|
||||
const version = object.header.version
|
||||
return get(OPENFLOW, version, `Unsupported OpenFlow version: ${version}`).pack(object)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const version = buffer.readUInt8(offset + scheme.offsets.version)
|
||||
return get(OPENFLOW, version, `Unsupported OpenFlow version: ${version}`).unpack(buffer, offset)
|
||||
},
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
import get from '../../util/get-from-map'
|
||||
|
||||
import ofOutput from './output'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const ACTION = {
|
||||
[of.actionType.output]: ofOutput,
|
||||
/* TODO:
|
||||
[of.actionType.group]: ,
|
||||
[of.actionType.setVlanId]: ,
|
||||
[of.actionType.setVlanPcp]: ,
|
||||
[of.actionType.setDlSrc]: ,
|
||||
[of.actionType.setDlDst]: ,
|
||||
[of.actionType.setNwSrc]: ,
|
||||
[of.actionType.setNwDst]: ,
|
||||
[of.actionType.setNwTos]: ,
|
||||
[of.actionType.setNwEcn]: ,
|
||||
[of.actionType.setTpSrc]: ,
|
||||
[of.actionType.setTpDst]: ,
|
||||
[of.actionType.copyTtlOut]: ,
|
||||
[of.actionType.copyTtlIn]: ,
|
||||
[of.actionType.setMplsLabel]: ,
|
||||
[of.actionType.setMplsTc]: ,
|
||||
[of.actionType.setMplsTtl]: ,
|
||||
[of.actionType.decMplsTtl]: ,
|
||||
[of.actionType.pushVlan]: ,
|
||||
[of.actionType.popVlan]: ,
|
||||
[of.actionType.pushMpls]: ,
|
||||
[of.actionType.popMpls]: ,
|
||||
[of.actionType.setQueue]: ,
|
||||
[of.actionType.setNwTtl]: ,
|
||||
[of.actionType.decNwTtl]: ,
|
||||
[of.actionType.experimenter]:
|
||||
*/
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
const { type } = object
|
||||
return get(ACTION, type, `Invalid action type: ${type}`).pack(object, buffer, offset)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const type = buffer.readUInt16BE(offset + of.offsets.actionHeader.type)
|
||||
return get(ACTION, type, `Invalid action type: ${type}`).unpack(buffer, offset)
|
||||
},
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.actionOutput
|
||||
|
||||
const PAD_LENGTH = 6
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
assert(object.type === of.actionType.output)
|
||||
object.len = of.sizes.actionOutput
|
||||
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.len)
|
||||
|
||||
buffer.writeUInt16BE(object.type, offset + OFFSETS.type)
|
||||
buffer.writeUInt16BE(object.len, offset + OFFSETS.len)
|
||||
|
||||
buffer.writeUInt32BE(object.port, offset + OFFSETS.port)
|
||||
buffer.writeUInt16BE(object.max_len, offset + OFFSETS.maxLen)
|
||||
|
||||
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const object = {}
|
||||
|
||||
object.type = buffer.readUInt16BE(offset + OFFSETS.type)
|
||||
assert(object.type === of.actionType.output)
|
||||
|
||||
object.len = buffer.readUInt16BE(offset + OFFSETS.len)
|
||||
assert(object.len === of.sizes.actionOutput)
|
||||
|
||||
object.port = buffer.readUInt32BE(offset + OFFSETS.port)
|
||||
object.max_len = buffer.readUInt16BE(offset + OFFSETS.maxLen)
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
import get from '../util/get-from-map'
|
||||
|
||||
import echo from './message/echo'
|
||||
import error from './message/error'
|
||||
import hello from './message/hello'
|
||||
import featuresRequest from './message/features-request'
|
||||
import featuresReply from './message/features-reply'
|
||||
import getConfigRequest from './message/get-config-request'
|
||||
import switchConfig from './message/switch-config'
|
||||
import flowMod from './message/flow-mod'
|
||||
import of from './openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const MESSAGE = {
|
||||
[of.type.hello]: hello,
|
||||
[of.type.error]: error,
|
||||
[of.type.featuresRequest]: featuresRequest,
|
||||
[of.type.featuresReply]: featuresReply,
|
||||
[of.type.echoRequest]: echo,
|
||||
[of.type.echoReply]: echo,
|
||||
[of.type.getConfigRequest]: getConfigRequest,
|
||||
[of.type.getConfigReply]: switchConfig,
|
||||
[of.type.setConfig]: switchConfig,
|
||||
[of.type.flowMod]: flowMod,
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
protocol: of,
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
pack: object => {
|
||||
const type = object.header.type
|
||||
return get(MESSAGE, type, `Invalid OpenFlow message type: ${type}`).pack(object)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const type = buffer.readUInt8(offset + of.offsets.header.type)
|
||||
return get(MESSAGE, type, `Invalid OpenFlow message type: ${type}`).unpack(buffer, offset)
|
||||
},
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import get from '../../util/get-from-map'
|
||||
|
||||
import ofAction from '../action/action'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const SIZES = {
|
||||
[of.actionType.output]: of.sizes.actionOutput,
|
||||
[of.actionType.group]: of.sizes.actionGroup,
|
||||
[of.actionType.setVlanId]: of.sizes.actionVlanId,
|
||||
[of.actionType.setVlanPcp]: of.sizes.actionVlanPcp,
|
||||
[of.actionType.setDlSrc]: of.sizes.actionDlAddr,
|
||||
[of.actionType.setDlDst]: of.sizes.actionDlAddr,
|
||||
[of.actionType.setNwSrc]: of.sizes.actionNwAddr,
|
||||
[of.actionType.setNwDst]: of.sizes.actionNwAddr,
|
||||
[of.actionType.setNwTos]: of.sizes.actionNwTos,
|
||||
[of.actionType.setNwEcn]: of.sizes.actionNwEcn,
|
||||
[of.actionType.setTpSrc]: of.sizes.actionTpPort,
|
||||
[of.actionType.setTpDst]: of.sizes.actionTpPort,
|
||||
[of.actionType.copyTtlOut]: of.sizes.actionHeader,
|
||||
[of.actionType.copyTtlIn]: of.sizes.actionHeader,
|
||||
[of.actionType.setMplsLabel]: of.sizes.actionMplsLabel,
|
||||
[of.actionType.setMplsTc]: of.sizes.actionMplsTc,
|
||||
[of.actionType.setMplsTtl]: of.sizes.actionMplsTtl,
|
||||
[of.actionType.decMplsTtl]: of.sizes.actionMplsTtl,
|
||||
[of.actionType.pushVlan]: of.sizes.actionPush,
|
||||
[of.actionType.popVlan]: of.sizes.actionHeader,
|
||||
[of.actionType.pushMpls]: of.sizes.actionPush,
|
||||
[of.actionType.popMpls]: of.sizes.actionPopMpls,
|
||||
[of.actionType.setQueue]: of.sizes.actionSetQueue,
|
||||
[of.actionType.setNwTtl]: of.sizes.actionNwTtl,
|
||||
[of.actionType.decNwTtl]: of.sizes.actionNwTtl,
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const TYPES = [of.instructionType.clearActions, of.instructionType.writeActions, of.instructionType.applyActions]
|
||||
const OFFSETS = of.offsets.instructionActions
|
||||
|
||||
const PAD_LENGTH = 4
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
const { type } = object
|
||||
assert(TYPES.includes(type))
|
||||
object.len = of.sizes.instructionActions
|
||||
const { actions = [] } = object
|
||||
actions.forEach(action => {
|
||||
assert(Object.values(of.actionType).includes(action.type))
|
||||
// TODO: manage experimenter
|
||||
object.len += get(SIZES, action.type, `Invalid action type: ${action.type}`)
|
||||
})
|
||||
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.len)
|
||||
|
||||
buffer.writeUInt16BE(type, offset + OFFSETS.type)
|
||||
buffer.writeUInt16BE(object.len, offset + OFFSETS.len)
|
||||
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
|
||||
|
||||
let actionOffset = offset + OFFSETS.actions
|
||||
actions.forEach(action => {
|
||||
ofAction.pack(action, buffer, actionOffset)
|
||||
actionOffset += SIZES[action.type]
|
||||
})
|
||||
},
|
||||
|
||||
unpack: (buffer = undefined, offset = 0) => {
|
||||
const type = buffer.readUInt16BE(offset + OFFSETS.type)
|
||||
assert(TYPES.includes(type))
|
||||
|
||||
const object = { type }
|
||||
object.len = buffer.readUInt16BE(offset + OFFSETS.len)
|
||||
|
||||
if (type === of.instructionType.clearActions) {
|
||||
// No actions for this type
|
||||
return object
|
||||
}
|
||||
|
||||
object.actions = []
|
||||
let actionOffset = offset + OFFSETS.actions
|
||||
while (actionOffset < object.len) {
|
||||
const action = ofAction.unpack(buffer, actionOffset)
|
||||
actionOffset += action.len
|
||||
object.actions.push(action)
|
||||
}
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
import get from '../../util/get-from-map'
|
||||
|
||||
import actions from './actions'
|
||||
// import goToTable from './goToTable'
|
||||
import of from '../openflow-11'
|
||||
// import writeMetadata from './writeMetadata'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const INSTRUCTION = {
|
||||
/* TODO:
|
||||
[of.instructionType.goToTable]: goToTable,
|
||||
[of.instructionType.writeMetadata]: writeMetadata,
|
||||
*/
|
||||
[of.instructionType.writeActions]: actions,
|
||||
[of.instructionType.applyActions]: actions,
|
||||
[of.instructionType.clearActions]: actions,
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const OFFSETS = of.offsets.instruction
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
const { type } = object
|
||||
return get(INSTRUCTION, type, `Invalid instruction type: ${type}`).pack(object, buffer, offset)
|
||||
},
|
||||
|
||||
unpack: (buffer = undefined, offset = 0) => {
|
||||
const type = buffer.readUInt16BE(offset + OFFSETS.type)
|
||||
return get(INSTRUCTION, type, `Invalid instruction type: ${type}`).unpack(buffer, offset)
|
||||
},
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
import assert from 'assert'
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.echo
|
||||
const TYPES = [of.type.echoRequest, of.type.echoReply]
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: object => {
|
||||
const { header, data } = object
|
||||
assert(TYPES.includes(header.type))
|
||||
const dataSize = data !== undefined ? data.length : 0
|
||||
header.length = of.sizes.header + dataSize
|
||||
|
||||
const buffer = Buffer.alloc(header.length)
|
||||
ofHeader.pack(header, buffer, OFFSETS.header)
|
||||
if (dataSize > 0) {
|
||||
data.copy(buffer, OFFSETS.data, 0, dataSize)
|
||||
}
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
|
||||
assert(TYPES.includes(header.type))
|
||||
|
||||
const object = { header }
|
||||
const dataSize = header.length - of.sizes.header
|
||||
if (dataSize > 0) {
|
||||
object.data = Buffer.alloc(dataSize)
|
||||
buffer.copy(object.data, 0, offset + OFFSETS.data, offset + OFFSETS.data + dataSize)
|
||||
}
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import get from '../../util/get-from-map'
|
||||
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const ERROR_CODE = {
|
||||
[of.errorType.helloFailed]: of.helloFailedCode,
|
||||
[of.errorType.badRequest]: of.badRequestCode,
|
||||
[of.errorType.badAction]: of.badActionCode,
|
||||
[of.errorType.badInstruction]: of.badInstructionCode,
|
||||
[of.errorType.badMatch]: of.badMatchCode,
|
||||
[of.errorType.flowModFailed]: of.flowModFailedCode,
|
||||
[of.errorType.groupModFailed]: of.groupModFailedCode,
|
||||
[of.errorType.portModFailed]: of.portModFailedCode,
|
||||
[of.errorType.tableModFailed]: of.tableModFailedCode,
|
||||
[of.errorType.queueOpFailed]: of.queueOpFailedCode,
|
||||
[of.errorType.switchConfigFailed]: of.switchConfigFailedCode,
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const OFFSETS = of.offsets.errorMsg
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
const { header, type, code, data } = object
|
||||
assert(header.type === of.type.error)
|
||||
const errorCodes = get(ERROR_CODE, type, `Invalid error type: ${type}`)
|
||||
assert(Object.values(errorCodes).includes(code))
|
||||
|
||||
object.length = of.sizes.errorMsg
|
||||
if (data !== undefined) {
|
||||
object.length += data.length
|
||||
}
|
||||
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.length)
|
||||
|
||||
ofHeader.pack(header, buffer, offset + OFFSETS.header)
|
||||
buffer.writeUInt16BE(type, offset + OFFSETS.type)
|
||||
buffer.writeUInt16BE(code, offset + OFFSETS.code)
|
||||
|
||||
if (data !== undefined) {
|
||||
data.copy(buffer, offset + OFFSETS.data, 0, data.length)
|
||||
}
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
|
||||
assert(header.type === of.type.error)
|
||||
|
||||
const type = buffer.readUInt16BE(offset + OFFSETS.type)
|
||||
const errorCodes = get(ERROR_CODE, type, `Invalid error type: ${type}`)
|
||||
|
||||
const code = buffer.readUInt16BE(offset + OFFSETS.code)
|
||||
assert(Object.values(errorCodes).includes(code))
|
||||
|
||||
const object = { header, type, code }
|
||||
const dataSize = header.length - of.sizes.errorMsg
|
||||
if (dataSize > 0) {
|
||||
object.data = Buffer.alloc(dataSize)
|
||||
buffer.copy(object.data, 0, offset + OFFSETS.data, offset + OFFSETS.data + dataSize)
|
||||
}
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import ofHeader from './header'
|
||||
import ofPort from '../struct/port'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.switchFeatures
|
||||
const PAD_LENGTH = 3
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: object => {
|
||||
const { header, datapath_id: did, n_buffers: nBufs, n_tables: nTables, capabilities, reserved, ports } = object
|
||||
assert(header.type === of.type.featuresReply)
|
||||
|
||||
header.length = of.sizes.switchFeatures + ports.length * of.sizes.port
|
||||
|
||||
const buffer = Buffer.alloc(header.length)
|
||||
ofHeader.pack(header, buffer, OFFSETS.header)
|
||||
|
||||
buffer.writeBigUInt64BE(did, OFFSETS.datapathId)
|
||||
buffer.writeUInt32BE(nBufs, OFFSETS.nBuffers)
|
||||
buffer.writeUInt8(nTables, OFFSETS.nTables)
|
||||
buffer.fill(0, OFFSETS.pad, OFFSETS.pad + PAD_LENGTH)
|
||||
buffer.writeUInt32BE(capabilities, OFFSETS.capabilities)
|
||||
buffer.writeUInt32BE(reserved, OFFSETS.reserved)
|
||||
|
||||
let portsOffset = 0
|
||||
ports.forEach(port => {
|
||||
ofPort.pack(port, buffer, OFFSETS.ports + portsOffset++ * of.sizes.port)
|
||||
})
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
|
||||
assert(header.type === of.type.featuresReply)
|
||||
|
||||
const object = { header }
|
||||
object.datapath_id = buffer.toString('hex', offset + OFFSETS.datapathId, offset + OFFSETS.datapathId + 8)
|
||||
object.n_buffers = buffer.readUInt32BE(offset + OFFSETS.nBuffers)
|
||||
object.n_tables = buffer.readUInt8(offset + OFFSETS.nTables)
|
||||
|
||||
object.capabilities = buffer.readUInt32BE(offset + OFFSETS.capabilities)
|
||||
object.reserved = buffer.readUInt32BE(offset + OFFSETS.reserved)
|
||||
|
||||
object.ports = []
|
||||
const nPorts = (header.length - of.sizes.switchFeatures) / of.sizes.port
|
||||
for (let i = 0; i < nPorts; ++i) {
|
||||
object.ports.push(ofPort.unpack(buffer, offset + OFFSETS.ports + i * of.sizes.port))
|
||||
}
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: object => {
|
||||
const { header } = object
|
||||
assert(header.type === of.type.featuresRequest)
|
||||
header.length = of.sizes.featuresRequest
|
||||
|
||||
return ofHeader.pack(header)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset)
|
||||
assert(header.type === of.type.featuresRequest)
|
||||
assert(header.length === of.sizes.featuresRequest)
|
||||
|
||||
return { header }
|
||||
},
|
||||
}
|
||||
@@ -1,167 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import get from '../../util/get-from-map'
|
||||
import ofInstruction from '../instruction/instruction'
|
||||
import uIntHelper from '../../util/uint-helper'
|
||||
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
import ofMatch from '../struct/match/match'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const INSTRUCTION_SIZE = {
|
||||
[of.instructionType.goToTable]: of.sizes.instructionWriteMetadata,
|
||||
[of.instructionType.writeMetadata]: of.sizes.instructionGotoTable,
|
||||
[of.instructionType.clearActions]: of.sizes.instructionActions,
|
||||
[of.instructionType.writeActions]: of.sizes.instructionActions,
|
||||
[of.instructionType.applyActions]: of.sizes.instructionActions,
|
||||
}
|
||||
|
||||
const ACTION_SIZE = {
|
||||
[of.actionType.output]: of.sizes.actionOutput,
|
||||
[of.actionType.group]: of.sizes.actionGroup,
|
||||
[of.actionType.setVlanId]: of.sizes.actionVlanId,
|
||||
[of.actionType.setVlanPcp]: of.sizes.actionVlanPcp,
|
||||
[of.actionType.setDlSrc]: of.sizes.actionDlAddr,
|
||||
[of.actionType.setDlDst]: of.sizes.actionDlAddr,
|
||||
[of.actionType.setNwSrc]: of.sizes.actionNwAddr,
|
||||
[of.actionType.setNwDst]: of.sizes.actionNwAddr,
|
||||
[of.actionType.setNwTos]: of.sizes.actionNwTos,
|
||||
[of.actionType.setNwEcn]: of.sizes.actionNwEcn,
|
||||
[of.actionType.setTpSrc]: of.sizes.actionTpPort,
|
||||
[of.actionType.setTpDst]: of.sizes.actionTpPort,
|
||||
[of.actionType.copyTtlOut]: of.sizes.actionHeader,
|
||||
[of.actionType.copyTtlIn]: of.sizes.actionHeader,
|
||||
[of.actionType.setMplsLabel]: of.sizes.actionMplsLabel,
|
||||
[of.actionType.setMplsTc]: of.sizes.actionMplsTc,
|
||||
[of.actionType.setMplsTtl]: of.sizes.actionMplsTtl,
|
||||
[of.actionType.decMplsTtl]: of.sizes.actionMplsTtl,
|
||||
[of.actionType.pushVlan]: of.sizes.actionPush,
|
||||
[of.actionType.popVlan]: of.sizes.actionHeader,
|
||||
[of.actionType.pushMpls]: of.sizes.actionPush,
|
||||
[of.actionType.popMpls]: of.sizes.actionPopMpls,
|
||||
[of.actionType.setQueue]: of.sizes.actionSetQueue,
|
||||
[of.actionType.setNwTtl]: of.sizes.actionNwTtl,
|
||||
[of.actionType.decNwTtl]: of.sizes.actionNwTtl,
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const OFFSETS = of.offsets.flowMod
|
||||
|
||||
const COOKIE_LENGTH = 8
|
||||
const PAD_LENGTH = 2
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
const {
|
||||
header,
|
||||
cookie,
|
||||
cookie_mask,
|
||||
table_id = 0,
|
||||
command,
|
||||
idle_timeout = 0,
|
||||
hard_timeout = 0,
|
||||
priority = of.defaultPriority,
|
||||
buffer_id = 0xffffffff,
|
||||
out_port = of.port.any,
|
||||
out_group = of.group.any,
|
||||
flags = 0,
|
||||
match,
|
||||
instructions = [],
|
||||
} = object
|
||||
// fill header length
|
||||
header.length = of.sizes.flowMod
|
||||
instructions.forEach(instruction => {
|
||||
header.length += get(INSTRUCTION_SIZE, instruction.type, `Invalid instruction type: ${instruction.type}`)
|
||||
const { actions = [] } = instruction
|
||||
actions.forEach(action => {
|
||||
header.length += get(ACTION_SIZE, action.type, `Invalid instruction type: ${action.type}`)
|
||||
})
|
||||
})
|
||||
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(header.length)
|
||||
|
||||
ofHeader.pack(header, buffer, offset + OFFSETS.header)
|
||||
|
||||
if (cookie !== undefined) {
|
||||
if (cookie_mask !== undefined) {
|
||||
cookie_mask.copy(buffer, offset + OFFSETS.cookieMask)
|
||||
} else {
|
||||
buffer.fill(0x00, offset + OFFSETS.cookie_mask, offset + OFFSETS.cookieMask + COOKIE_LENGTH)
|
||||
}
|
||||
cookie.copy(buffer, offset + OFFSETS.cookie)
|
||||
} else {
|
||||
buffer.fill(0x00, offset + OFFSETS.cookie, offset + OFFSETS.cookie + COOKIE_LENGTH)
|
||||
buffer.fill(0xff, offset + OFFSETS.cookieMask, offset + OFFSETS.cookieMask + COOKIE_LENGTH)
|
||||
}
|
||||
|
||||
buffer.writeUInt8(table_id, offset + OFFSETS.tableId)
|
||||
assert(Object.values(of.flowModCommand).includes(command))
|
||||
buffer.writeUInt8(command, offset + OFFSETS.command)
|
||||
buffer.writeUInt16BE(idle_timeout, offset + OFFSETS.idleTimeout)
|
||||
buffer.writeUInt16BE(hard_timeout, offset + OFFSETS.hardTimeout)
|
||||
buffer.writeUInt16BE(priority, offset + OFFSETS.priority)
|
||||
buffer.writeUInt32BE(buffer_id, offset + OFFSETS.bufferId)
|
||||
buffer.writeUInt32BE(out_port, offset + OFFSETS.outPort)
|
||||
buffer.writeUInt32BE(out_group, offset + OFFSETS.outGroup)
|
||||
buffer.writeUInt16BE(flags, offset + OFFSETS.flags)
|
||||
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
|
||||
|
||||
ofMatch.pack(match, buffer, offset + OFFSETS.match)
|
||||
|
||||
let instructionOffset = offset + OFFSETS.instructions
|
||||
instructions.forEach(instruction => {
|
||||
ofInstruction.pack(instruction, buffer, instructionOffset)
|
||||
instructionOffset += instruction.len
|
||||
})
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
|
||||
assert(header.type === of.type.flowMod)
|
||||
|
||||
const object = { header }
|
||||
|
||||
object.cookie = Buffer.alloc(COOKIE_LENGTH)
|
||||
buffer.copy(object.cookie, 0, offset + OFFSETS.cookie, offset + OFFSETS.cookie + COOKIE_LENGTH)
|
||||
if (
|
||||
!uIntHelper.isUInt64None([
|
||||
buffer.readUInt32BE(offset + OFFSETS.cookieMask),
|
||||
buffer.readUInt32BE(offset + OFFSETS.cookieMask + COOKIE_LENGTH / 2),
|
||||
])
|
||||
) {
|
||||
object.cookie_mask = Buffer.alloc(COOKIE_LENGTH)
|
||||
buffer.copy(object.cookie_mask, 0, offset + OFFSETS.cookieMask, offset + OFFSETS.cookieMask + COOKIE_LENGTH)
|
||||
}
|
||||
|
||||
object.table_id = buffer.readUInt8(offset + OFFSETS.tableId)
|
||||
object.command = buffer.readUInt8(offset + OFFSETS.command)
|
||||
assert(Object.values(of.flowModCommand).includes(object.command))
|
||||
|
||||
object.idle_timeout = buffer.readUInt16BE(offset + OFFSETS.idleTimeout)
|
||||
object.hard_timeout = buffer.readUInt16BE(offset + OFFSETS.hardTimeout)
|
||||
object.priority = buffer.readUInt16BE(offset + OFFSETS.priority)
|
||||
object.buffer_id = buffer.readUInt32BE(offset + OFFSETS.bufferId)
|
||||
object.out_port = buffer.readUInt32BE(offset + OFFSETS.outPort)
|
||||
object.out_group = buffer.readUInt32BE(offset + OFFSETS.outGroup)
|
||||
object.flags = buffer.readUInt16BE(offset + OFFSETS.flags)
|
||||
|
||||
object.match = ofMatch.unpack(buffer, offset + OFFSETS.match)
|
||||
|
||||
object.instructions = []
|
||||
let instructionOffset = offset + OFFSETS.instructions
|
||||
while (instructionOffset < header.length) {
|
||||
const instruction = ofInstruction.unpack(buffer, instructionOffset)
|
||||
object.instructions.push(instruction)
|
||||
instructionOffset += instruction.len
|
||||
}
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: object => {
|
||||
const { header } = object
|
||||
assert(header.type === of.type.getConfigRequest)
|
||||
header.length = of.sizes.header
|
||||
|
||||
return ofHeader.pack(header)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset)
|
||||
assert(header.type === of.type.getConfigRequest)
|
||||
assert(header.length === of.sizes.header)
|
||||
|
||||
return { header }
|
||||
},
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.header
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(of.sizes.header)
|
||||
const { version, type, length, xid } = object
|
||||
|
||||
assert(version === of.version)
|
||||
assert(Object.values(of.type).includes(type))
|
||||
|
||||
buffer.writeUInt8(version, offset + OFFSETS.version)
|
||||
buffer.writeUInt8(type, offset + OFFSETS.type)
|
||||
buffer.writeUInt16BE(length, offset + OFFSETS.length)
|
||||
buffer.writeUInt32BE(xid, offset + OFFSETS.xid)
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const version = buffer.readUInt8(offset + OFFSETS.version)
|
||||
assert(version === of.version)
|
||||
|
||||
const type = buffer.readUInt8(offset + OFFSETS.type)
|
||||
assert(Object.values(of.type).includes(type))
|
||||
|
||||
const length = buffer.readUInt16BE(offset + OFFSETS.length)
|
||||
const xid = buffer.readUInt32BE(offset + OFFSETS.xid)
|
||||
|
||||
return { version, type, length, xid }
|
||||
},
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.hello
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: object => {
|
||||
const { header } = object
|
||||
assert(header.type === of.type.hello)
|
||||
header.length = of.sizes.hello
|
||||
|
||||
return ofHeader.pack(header)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
|
||||
assert(header.type === of.type.hello)
|
||||
|
||||
return { header }
|
||||
},
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.switchConfig
|
||||
const TYPES = [of.type.getConfigReply, of.type.setConfig]
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: object => {
|
||||
const { header, flags, miss_send_len } = object
|
||||
assert(TYPES.includes(header.type))
|
||||
header.length = of.sizes.switchConfig
|
||||
|
||||
const buffer = Buffer.alloc(header.length)
|
||||
ofHeader.pack(header, buffer, OFFSETS.header)
|
||||
|
||||
buffer.writeUInt16BE(flags, OFFSETS.flags)
|
||||
buffer.writeUInt16BE(miss_send_len, OFFSETS.missSendLen)
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
|
||||
assert(TYPES.includes(header.type))
|
||||
assert(header.length === of.sizes.switchConfig)
|
||||
|
||||
const flags = buffer.readUInt16BE(offset + OFFSETS.flags)
|
||||
const miss_send_len = buffer.readUInt16BE(offset + OFFSETS.missSendLen)
|
||||
|
||||
return { header, flags, miss_send_len }
|
||||
},
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,274 +0,0 @@
|
||||
import assert from 'assert'
|
||||
import addressParser from '../../../util/addrress-parser'
|
||||
import uIntHelper from '../../../util/uint-helper'
|
||||
import of from '../../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.match
|
||||
const WILDCARDS = of.flowWildcards
|
||||
|
||||
const IP4_ADDR_LEN = 4
|
||||
const METADATA_LENGTH = 8
|
||||
const PAD_LENGTH = 1
|
||||
const PAD2_LENGTH = 3
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
assert(object.type === of.matchType.standard)
|
||||
object.length = of.sizes.match
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.length)
|
||||
|
||||
buffer.writeUInt16BE(object.type, offset + OFFSETS.type)
|
||||
buffer.writeUInt16BE(object.length, offset + OFFSETS.length)
|
||||
|
||||
let wildcards = 0
|
||||
let inPort = 0
|
||||
if (object.in_port !== undefined) {
|
||||
inPort = object.in_port
|
||||
} else {
|
||||
wildcards |= WILDCARDS.inPort
|
||||
}
|
||||
buffer.writeUInt32BE(inPort, offset + OFFSETS.inPort)
|
||||
|
||||
if (object.dl_src !== undefined) {
|
||||
if (object.dl_src_mask !== undefined) {
|
||||
addressParser.stringToEth(object.dl_src_mask, buffer, offset + OFFSETS.dlSrcMask)
|
||||
} else {
|
||||
buffer.fill(0x00, offset + OFFSETS.dlSrcMask, offset + OFFSETS.dlSrcMask + of.ethAddrLen)
|
||||
}
|
||||
addressParser.stringToEth(object.dl_src, buffer, offset + OFFSETS.dlSrc)
|
||||
} else {
|
||||
buffer.fill(0x00, offset + OFFSETS.dlSrc, offset + OFFSETS.dlSrc + of.ethAddrLen)
|
||||
buffer.fill(0xff, offset + OFFSETS.dlSrcMask, offset + OFFSETS.dlSrcMask + of.ethAddrLen)
|
||||
}
|
||||
|
||||
if (object.dl_dst !== undefined) {
|
||||
if (object.dl_dst_mask !== undefined) {
|
||||
addressParser.stringToEth(object.dl_dst_mask, buffer, offset + OFFSETS.dlDstMask)
|
||||
} else {
|
||||
buffer.fill(0x00, offset + OFFSETS.dlDstMask, offset + OFFSETS.dlDstMask + of.ethAddrLen)
|
||||
}
|
||||
addressParser.stringToEth(object.dl_dst, buffer, offset + OFFSETS.dlDst)
|
||||
} else {
|
||||
buffer.fill(0x00, offset + OFFSETS.dlDst, offset + OFFSETS.dlDst + of.ethAddrLen)
|
||||
buffer.fill(0xff, offset + OFFSETS.dlDstMask, offset + OFFSETS.dlDstMask + of.ethAddrLen)
|
||||
}
|
||||
|
||||
let dlVlan = 0
|
||||
if (object.dl_vlan !== undefined) {
|
||||
dlVlan = object.dl_vlan
|
||||
} else {
|
||||
wildcards |= WILDCARDS.dlVlan
|
||||
}
|
||||
buffer.writeUInt16BE(dlVlan, offset + OFFSETS.dlVlan)
|
||||
|
||||
let dlVlanPcp = 0
|
||||
if (object.dl_vlan_pcp !== undefined) {
|
||||
dlVlanPcp = object.dl_vlan_pcp
|
||||
} else {
|
||||
wildcards |= WILDCARDS.dlVlanPcp
|
||||
}
|
||||
buffer.writeUInt8(dlVlanPcp, offset + OFFSETS.dlVlanPcp)
|
||||
|
||||
buffer.fill(0, offset + OFFSETS.pad1, offset + OFFSETS.pad1 + PAD_LENGTH)
|
||||
|
||||
let dlType = 0
|
||||
if (object.dl_type !== undefined) {
|
||||
dlType = object.dl_type
|
||||
} else {
|
||||
wildcards |= WILDCARDS.dlType
|
||||
}
|
||||
buffer.writeUInt16BE(dlType, offset + OFFSETS.dlType)
|
||||
|
||||
let nwTos = 0
|
||||
if (object.nw_tos !== undefined) {
|
||||
nwTos = object.nw_tos
|
||||
} else {
|
||||
wildcards |= WILDCARDS.nwTos
|
||||
}
|
||||
buffer.writeUInt8(nwTos, offset + OFFSETS.nwTos)
|
||||
|
||||
let nwProto = 0
|
||||
if (object.nw_proto !== undefined) {
|
||||
nwProto = object.nw_proto
|
||||
} else {
|
||||
wildcards |= WILDCARDS.nwProto
|
||||
}
|
||||
buffer.writeUInt8(nwProto, offset + OFFSETS.nwProto)
|
||||
|
||||
if (object.nw_src !== undefined) {
|
||||
if (object.nw_src_mask !== undefined) {
|
||||
addressParser.stringToip4(object.nw_src_mask, buffer, offset + OFFSETS.nwSrcMask)
|
||||
} else {
|
||||
buffer.fill(0x00, offset + OFFSETS.nwSrcMask, offset + OFFSETS.nwSrcMask + IP4_ADDR_LEN)
|
||||
}
|
||||
addressParser.stringToip4(object.nw_src, buffer, offset + OFFSETS.nwSrc)
|
||||
} else {
|
||||
buffer.fill(0x00, offset + OFFSETS.nwSrc, offset + OFFSETS.nwSrc + IP4_ADDR_LEN)
|
||||
buffer.fill(0xff, offset + OFFSETS.nwSrcMask, offset + OFFSETS.nwSrcMask + IP4_ADDR_LEN)
|
||||
}
|
||||
|
||||
if (object.nw_dst !== undefined) {
|
||||
if (object.nw_dst_mask !== undefined) {
|
||||
addressParser.stringToip4(object.nw_dst_mask, buffer, offset + OFFSETS.nwDstMask)
|
||||
} else {
|
||||
buffer.fill(0x00, offset + OFFSETS.nwDstMask, offset + OFFSETS.nwDstMask + IP4_ADDR_LEN)
|
||||
}
|
||||
addressParser.stringToip4(object.nw_dst, buffer, offset + OFFSETS.nwDst)
|
||||
} else {
|
||||
buffer.fill(0x00, offset + OFFSETS.nwDst, offset + OFFSETS.nwDst + IP4_ADDR_LEN)
|
||||
buffer.fill(0xff, offset + OFFSETS.nwDstMask, offset + OFFSETS.nwDstMask + IP4_ADDR_LEN)
|
||||
}
|
||||
|
||||
let tpSrc = 0
|
||||
if (object.tp_src !== undefined) {
|
||||
tpSrc = object.tp_src
|
||||
} else {
|
||||
wildcards |= WILDCARDS.tpSrc
|
||||
}
|
||||
buffer.writeUInt16BE(tpSrc, offset + OFFSETS.tpSrc)
|
||||
|
||||
let tpDst = 0
|
||||
if (object.tp_dst !== undefined) {
|
||||
tpDst = object.tp_dst
|
||||
} else {
|
||||
wildcards |= WILDCARDS.tpDst
|
||||
}
|
||||
buffer.writeUInt16BE(tpDst, offset + OFFSETS.tpDst)
|
||||
|
||||
let mplsLabel = 0
|
||||
if (object.mpls_label !== undefined) {
|
||||
mplsLabel = object.mpls_label
|
||||
} else {
|
||||
wildcards |= WILDCARDS.mplsLabel
|
||||
}
|
||||
buffer.writeUInt32BE(mplsLabel, offset + OFFSETS.mplsLabel)
|
||||
|
||||
let mplsTc = 0
|
||||
if (object.mpls_tc !== undefined) {
|
||||
mplsTc = object.mpls_tc
|
||||
} else {
|
||||
wildcards |= WILDCARDS.mplsTc
|
||||
}
|
||||
buffer.writeUInt8(mplsTc, offset + OFFSETS.mplsTc)
|
||||
|
||||
buffer.fill(0, offset + OFFSETS.pad2, offset + OFFSETS.pad2 + PAD2_LENGTH)
|
||||
|
||||
if (object.metadata !== undefined) {
|
||||
if (object.metadata_mask !== undefined) {
|
||||
buffer.copy(
|
||||
object.metadata_mask,
|
||||
0,
|
||||
offset + OFFSETS.metadataMask,
|
||||
offset + OFFSETS.metadataMask + METADATA_LENGTH
|
||||
)
|
||||
} else {
|
||||
buffer.fill(0x00, offset + OFFSETS.metadataMask, offset + OFFSETS.metadataMask + METADATA_LENGTH)
|
||||
}
|
||||
buffer.copy(object.metadata, 0, offset + OFFSETS.metadata, offset + OFFSETS.metadata + METADATA_LENGTH)
|
||||
} else {
|
||||
buffer.fill(0x00, offset + OFFSETS.metadata, offset + OFFSETS.metadata + METADATA_LENGTH)
|
||||
buffer.fill(0xff, offset + OFFSETS.metadataMask, offset + OFFSETS.metadataMask + METADATA_LENGTH)
|
||||
}
|
||||
|
||||
buffer.writeUInt32BE(wildcards, offset + OFFSETS.wildcards)
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const object = {}
|
||||
object.type = buffer.readUInt16BE(offset + OFFSETS.type)
|
||||
assert(object.type === of.matchType.standard)
|
||||
|
||||
object.length = buffer.readUInt16BE(offset + OFFSETS.length)
|
||||
assert(object.length === of.sizes.match)
|
||||
|
||||
// Wildcards indicate which value to use for the match.
|
||||
// if `wildcards & of.wildcards.<value>` === 0 then `value` is not wildcarded and must be used.
|
||||
const wildcards = (object.wildcards = buffer.readUInt32BE(offset + OFFSETS.wildcards))
|
||||
if ((wildcards & WILDCARDS.inPort) === 0) {
|
||||
object.in_port = buffer.readUInt32BE(offset + OFFSETS.inPort)
|
||||
}
|
||||
|
||||
if (!addressParser.isEthMaskAll(buffer, offset + OFFSETS.dlSrcMask)) {
|
||||
if (!addressParser.isEthMaskNone(buffer, offset + OFFSETS.dlSrcMask)) {
|
||||
object.dl_src_mask = addressParser.ethToString(buffer, offset + OFFSETS.dlSrcMask)
|
||||
}
|
||||
object.dl_src = addressParser.ethToString(buffer, offset + OFFSETS.dlSrc)
|
||||
}
|
||||
if (!addressParser.isEthMaskAll(buffer, offset + OFFSETS.dlDstMask)) {
|
||||
if (!addressParser.isEthMaskNone(buffer, offset + OFFSETS.dlDstMask)) {
|
||||
object.dl_dst_mask = addressParser.ethToString(buffer, offset + OFFSETS.dlDstMask)
|
||||
}
|
||||
object.dl_dst = addressParser.ethToString(buffer, offset + OFFSETS.dlDst)
|
||||
}
|
||||
|
||||
if ((wildcards & WILDCARDS.dlVlan) === 0) {
|
||||
object.dl_vlan = buffer.readUInt16BE(offset + OFFSETS.dlVlan)
|
||||
}
|
||||
if ((wildcards & WILDCARDS.dlVlanPcp) === 0) {
|
||||
object.dl_vlan_pcp = buffer.readUInt16BE(offset + OFFSETS.dlVlanPcp)
|
||||
}
|
||||
if ((wildcards & WILDCARDS.dlType) === 0) {
|
||||
object.dl_type = buffer.readUInt16BE(offset + OFFSETS.dlType)
|
||||
}
|
||||
|
||||
if ((wildcards & WILDCARDS.nwTos) === 0) {
|
||||
object.nw_tos = buffer.readUInt8(offset + OFFSETS.nwTos)
|
||||
}
|
||||
if ((wildcards & WILDCARDS.nwProto) === 0) {
|
||||
object.nw_proto = buffer.readUInt8(offset + OFFSETS.nwProto)
|
||||
}
|
||||
|
||||
if (!addressParser.isIp4MaskAll(buffer, offset + OFFSETS.nwSrcMask)) {
|
||||
if (!addressParser.isIp4MaskNone(buffer, offset + OFFSETS.nwSrcMask)) {
|
||||
object.nw_src_mask = addressParser.ip4ToString(buffer, offset + OFFSETS.nwSrcMask)
|
||||
}
|
||||
object.nw_src = addressParser.ip4ToString(buffer, offset + OFFSETS.nwSrc)
|
||||
}
|
||||
if (!addressParser.isIp4MaskAll(buffer, offset + OFFSETS.nwDstMask)) {
|
||||
if (!addressParser.isIp4MaskNone(buffer, offset + OFFSETS.nwDstMask)) {
|
||||
object.nw_dst_mask = addressParser.ip4ToString(buffer, offset + OFFSETS.nwDstMask)
|
||||
}
|
||||
object.nw_dst = addressParser.ip4ToString(buffer, offset + OFFSETS.nwDst)
|
||||
}
|
||||
|
||||
if ((wildcards & WILDCARDS.tpSrc) === 0) {
|
||||
object.tp_src = buffer.readUInt16BE(offset + OFFSETS.tpSrc)
|
||||
}
|
||||
if ((wildcards & WILDCARDS.tpDst) === 0) {
|
||||
object.tp_dst = buffer.readUInt16BE(offset + OFFSETS.tpDst)
|
||||
}
|
||||
|
||||
if ((wildcards & WILDCARDS.mplsLabel) === 0) {
|
||||
object.mpls_label = buffer.readUInt32BE(offset + OFFSETS.mplsLabel)
|
||||
}
|
||||
if ((wildcards & WILDCARDS.mplsTc) === 0) {
|
||||
object.mpls_tc = buffer.readUInt32BE(offset + OFFSETS.mplsTc)
|
||||
}
|
||||
|
||||
const metadataMask = [
|
||||
buffer.readUInt32BE(offset + OFFSETS.metadataMask),
|
||||
buffer.readUInt32BE(offset + OFFSETS.metadataMask + METADATA_LENGTH / 2),
|
||||
]
|
||||
if (!uIntHelper.isUInt64All(metadataMask)) {
|
||||
if (!uIntHelper.isUInt64None(metadataMask)) {
|
||||
object.metadata_mask = Buffer.alloc(METADATA_LENGTH)
|
||||
buffer.copy(
|
||||
object.metadata_mask,
|
||||
0,
|
||||
offset + OFFSETS.metadataMask,
|
||||
offset + OFFSETS.metadataMask + METADATA_LENGTH
|
||||
)
|
||||
}
|
||||
object.metadata = Buffer.alloc(METADATA_LENGTH)
|
||||
buffer.copy(object.metadata, 0, offset + OFFSETS.metadata, offset + OFFSETS.metadata + METADATA_LENGTH)
|
||||
}
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
import of from '../openflow-11'
|
||||
import addressParser from '../../util/addrress-parser'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.port
|
||||
const PAD_LENGTH = 4
|
||||
const PAD2_LENGTH = 2
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(of.sizes.port)
|
||||
const {
|
||||
port_no: portNo,
|
||||
hw_addr: hwAddr,
|
||||
name,
|
||||
config,
|
||||
state,
|
||||
curr,
|
||||
advertised,
|
||||
supported,
|
||||
peer,
|
||||
curr_speed: currSpeed,
|
||||
max_speed: maxSpeed,
|
||||
} = object
|
||||
|
||||
buffer.writeUInt32BE(portNo, offset + OFFSETS.portNo)
|
||||
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
|
||||
addressParser.stringToEth(hwAddr, buffer, offset + OFFSETS.hwAddr)
|
||||
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD2_LENGTH)
|
||||
buffer.write(name, offset + OFFSETS.name, of.maxPortNameLen)
|
||||
if (name.length < of.maxPortNameLen) {
|
||||
buffer.fill(0, offset + OFFSETS.name + name.length, offset + OFFSETS.name + of.maxPortNameLen)
|
||||
}
|
||||
|
||||
buffer.writeUInt32BE(config, offset + OFFSETS.config)
|
||||
buffer.writeUInt32BE(state, offset + OFFSETS.state)
|
||||
buffer.writeUInt32BE(curr, offset + OFFSETS.curr)
|
||||
buffer.writeUInt32BE(advertised, offset + OFFSETS.advertised)
|
||||
buffer.writeUInt32BE(supported, offset + OFFSETS.supported)
|
||||
buffer.writeUInt32BE(peer, offset + OFFSETS.peer)
|
||||
buffer.writeUInt32BE(currSpeed, offset + OFFSETS.currSpeed)
|
||||
buffer.writeUInt32BE(maxSpeed, offset + OFFSETS.maxSpeed)
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const body = {}
|
||||
|
||||
body.port_no = buffer.readUInt32BE(offset + OFFSETS.portNo)
|
||||
body.hw_addr = addressParser.ethToString(buffer, offset + OFFSETS.hwAddr)
|
||||
|
||||
const name = buffer.toString('utf8', offset + OFFSETS.name, offset + OFFSETS.name + of.maxPortNameLen)
|
||||
body.name = name.substr(0, name.indexOf('\0')) // Remove useless 0 if name.length < of.maxPortNameLen
|
||||
|
||||
body.config = buffer.readUInt32BE(offset + OFFSETS.config)
|
||||
body.state = buffer.readUInt32BE(offset + OFFSETS.state)
|
||||
|
||||
body.curr = buffer.readUInt32BE(offset + OFFSETS.curr)
|
||||
body.advertised = buffer.readUInt32BE(offset + OFFSETS.advertised)
|
||||
body.supported = buffer.readUInt32BE(offset + OFFSETS.supported)
|
||||
body.peer = buffer.readUInt32BE(offset + OFFSETS.peer)
|
||||
|
||||
body.curr_speed = buffer.readUInt32BE(offset + OFFSETS.currSpeed)
|
||||
body.max_speed = buffer.readUInt32BE(offset + OFFSETS.maxSpeed)
|
||||
|
||||
return body
|
||||
},
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import of from './index'
|
||||
import scheme from './default-header-scheme'
|
||||
import { readChunk } from '@vates/read-chunk'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default async function* parse(socket) {
|
||||
let buffer = Buffer.alloc(1024)
|
||||
let data
|
||||
|
||||
// Read the header
|
||||
while ((data = await readChunk(socket, scheme.size)) !== null) {
|
||||
// Read OpenFlow message size from its header
|
||||
const msgSize = data.readUInt16BE(scheme.offsets.length)
|
||||
data.copy(buffer, 0, 0, scheme.size)
|
||||
|
||||
if (buffer.length < msgSize) {
|
||||
buffer = resize(buffer, msgSize)
|
||||
}
|
||||
|
||||
// Read the rest of the openflow message
|
||||
if (msgSize > scheme.size) {
|
||||
data = await readChunk(socket, msgSize - scheme.size)
|
||||
assert.notStrictEqual(data, null)
|
||||
data.copy(buffer, scheme.size, 0, msgSize - scheme.size)
|
||||
}
|
||||
|
||||
yield of.unpack(buffer)
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
function resize(buffer, size) {
|
||||
let newLength = buffer.length
|
||||
do {
|
||||
newLength *= 2
|
||||
} while (newLength < size)
|
||||
|
||||
const newBuffer = Buffer.alloc(newLength)
|
||||
buffer.copy(newBuffer)
|
||||
return newBuffer
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
import assert from 'assert'
|
||||
import util from 'util'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
isEthMaskNone: (buffer, offset) =>
|
||||
buffer.readUInt32BE(offset) === 0x00000000 && buffer.readUInt16BE(offset + 4) === 0x0000,
|
||||
|
||||
isEthMaskAll: (buffer, offset) =>
|
||||
buffer.readUInt32BE(offset) === 0xffffffff && buffer.readUInt16BE(offset + 4) === 0xffff,
|
||||
|
||||
isIp4MaskNone: (buffer, offset) => buffer.readUInt32BE(offset) === 0x00000000,
|
||||
|
||||
isIp4MaskAll: (buffer, offset) => buffer.readUInt32BE(offset) === 0xffffffff,
|
||||
|
||||
ethToString: (buffer, offset) =>
|
||||
buffer.toString('hex', offset, offset + 1) +
|
||||
':' +
|
||||
buffer.toString('hex', offset + 1, offset + 2) +
|
||||
':' +
|
||||
buffer.toString('hex', offset + 2, offset + 3) +
|
||||
':' +
|
||||
buffer.toString('hex', offset + 3, offset + 4) +
|
||||
':' +
|
||||
buffer.toString('hex', offset + 4, offset + 5) +
|
||||
':' +
|
||||
buffer.toString('hex', offset + 5, offset + 6),
|
||||
|
||||
stringToEth: (string, buffer, offset) => {
|
||||
const eth = /^([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2})$/.exec(
|
||||
string
|
||||
)
|
||||
assert(eth !== null)
|
||||
buffer.writeUInt8(parseInt(eth[1], 16), offset)
|
||||
buffer.writeUInt8(parseInt(eth[2], 16), offset + 1)
|
||||
buffer.writeUInt8(parseInt(eth[3], 16), offset + 2)
|
||||
buffer.writeUInt8(parseInt(eth[4], 16), offset + 3)
|
||||
buffer.writeUInt8(parseInt(eth[5], 16), offset + 4)
|
||||
buffer.writeUInt8(parseInt(eth[6], 16), offset + 5)
|
||||
},
|
||||
|
||||
ip4ToString: (buffer, offset) =>
|
||||
util.format(
|
||||
'%d.%d.%d.%d',
|
||||
buffer.readUInt8(offset),
|
||||
buffer.readUInt8(offset + 1),
|
||||
buffer.readUInt8(offset + 2),
|
||||
buffer.readUInt8(offset + 3)
|
||||
),
|
||||
|
||||
stringToip4: (string, buffer, offset) => {
|
||||
const ip = /^([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])$/.exec(
|
||||
string
|
||||
)
|
||||
assert(ip !== null)
|
||||
buffer.writeUInt8(parseInt(ip[1], 10), offset)
|
||||
buffer.writeUInt8(parseInt(ip[2], 10), offset + 1)
|
||||
buffer.writeUInt8(parseInt(ip[3], 10), offset + 2)
|
||||
buffer.writeUInt8(parseInt(ip[4], 10), offset + 3)
|
||||
},
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
export default function get(map, key, errorMsg = undefined) {
|
||||
const value = map[String(key)]
|
||||
assert.notStrictEqual(value, undefined, errorMsg !== undefined ? errorMsg : `${key} is invalid`)
|
||||
return value
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
const ZERO = 0x00000000
|
||||
const ALL = 0xffffffff
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
isUInt64None: n => n[0] === ZERO && n[1] === ZERO,
|
||||
|
||||
isUInt64All: n => n[0] === ALL && n[1] === ALL,
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
export default {
|
||||
// TODO: more version
|
||||
// openFlow10: 0x01,
|
||||
openFlow11: 0x02,
|
||||
// openFlow12: 0x03,
|
||||
// openFlow13: 0x04,
|
||||
// openFlow14: 0x05,
|
||||
// openFlow15: 0x06,
|
||||
}
|
||||
@@ -19,14 +19,7 @@ Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/self-
|
||||
```js
|
||||
import { genSelfSigned } from '@xen-orchestra/self-signed'
|
||||
|
||||
console.log(
|
||||
await genSelfSigned({
|
||||
// Number of days this certificate will be valid.
|
||||
//
|
||||
// Default: 360
|
||||
days: 600,
|
||||
})
|
||||
)
|
||||
console.log(await genSelfSigned())
|
||||
// {
|
||||
// cert: '-----BEGIN CERTIFICATE-----\n' +
|
||||
// // content…
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
```js
|
||||
import { genSelfSigned } from '@xen-orchestra/self-signed'
|
||||
|
||||
console.log(
|
||||
await genSelfSigned({
|
||||
// Number of days this certificate will be valid.
|
||||
//
|
||||
// Default: 360
|
||||
days: 600,
|
||||
})
|
||||
)
|
||||
console.log(await genSelfSigned())
|
||||
// {
|
||||
// cert: '-----BEGIN CERTIFICATE-----\n' +
|
||||
// // content…
|
||||
|
||||
@@ -10,12 +10,16 @@ const openssl = (cmd, args, { input, ...opts } = {}) =>
|
||||
}
|
||||
})
|
||||
|
||||
exports.genSelfSignedCert = async ({ days = 360 } = {}) => {
|
||||
exports.genSelfSignedCert = async () => {
|
||||
const key = await openssl('genrsa', ['2048'])
|
||||
return {
|
||||
cert: await openssl('req', ['-batch', '-new', '-key', '-', '-x509', '-days', String(days), '-nodes'], {
|
||||
input: key,
|
||||
}),
|
||||
cert: await openssl(
|
||||
'req',
|
||||
['-batch', '-new', '-key', '-', '-x509', '-days', '360', '-nodes'],
|
||||
{
|
||||
input: key,
|
||||
}
|
||||
),
|
||||
key,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -3,7 +3,10 @@ import escapeRegExp from 'lodash/escapeRegExp'
|
||||
const compareLengthDesc = (a, b) => b.length - a.length
|
||||
|
||||
export function compileTemplate(pattern, rules) {
|
||||
const matches = Object.keys(rules).sort(compareLengthDesc).map(escapeRegExp).join('|')
|
||||
const matches = Object.keys(rules)
|
||||
.sort(compareLengthDesc)
|
||||
.map(escapeRegExp)
|
||||
.join('|')
|
||||
const regExp = new RegExp(`\\\\(?:\\\\|${matches})|${matches}`, 'g')
|
||||
return (...params) =>
|
||||
pattern.replace(regExp, match => {
|
||||
|
||||
@@ -2,10 +2,13 @@
|
||||
import { compileTemplate } from '.'
|
||||
|
||||
it("correctly replaces the template's variables", () => {
|
||||
const replacer = compileTemplate('{property}_\\{property}_\\\\{property}_{constant}_%_FOO', {
|
||||
'{property}': obj => obj.name,
|
||||
'{constant}': 1235,
|
||||
'%': (_, i) => i,
|
||||
})
|
||||
const replacer = compileTemplate(
|
||||
'{property}_\\{property}_\\\\{property}_{constant}_%_FOO',
|
||||
{
|
||||
'{property}': obj => obj.name,
|
||||
'{constant}': 1235,
|
||||
'%': (_, i) => i,
|
||||
}
|
||||
)
|
||||
expect(replacer({ name: 'bar' }, 5)).toBe('bar_{property}_\\bar_1235_5_FOO')
|
||||
})
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/upload-ova",
|
||||
"version": "0.1.4",
|
||||
"version": "0.1.3",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Basic CLI to upload ova files to Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -33,24 +33,23 @@
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"chalk": "^4.1.0",
|
||||
"chalk": "^2.2.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"form-data": "^3.0.0",
|
||||
"fs-extra": "^9.0.0",
|
||||
"fs-promise": "^2.0.3",
|
||||
"get-stream": "^6.0.0",
|
||||
"get-stream": "^4.1.0",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"human-format": "^0.11.0",
|
||||
"human-format": "^0.10.0",
|
||||
"l33teral": "^3.0.3",
|
||||
"lodash": "^4.17.4",
|
||||
"nice-pipe": "0.0.0",
|
||||
"pretty-ms": "^7.0.0",
|
||||
"pretty-ms": "^4.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"pw": "^0.0.4",
|
||||
"strip-indent": "^3.0.0",
|
||||
"xdg-basedir": "^4.0.0",
|
||||
"xo-lib": "^0.10.1",
|
||||
"xo-vmdk-to-vhd": "^2.0.0"
|
||||
"strip-indent": "^2.0.0",
|
||||
"xdg-basedir": "^3.0.0",
|
||||
"xo-lib": "^0.9.0",
|
||||
"xo-vmdk-to-vhd": "^1.2.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
|
||||
import chalk from 'chalk'
|
||||
import execPromise from 'exec-promise'
|
||||
import FormData from 'form-data'
|
||||
import { createReadStream } from 'fs'
|
||||
import { stat } from 'fs-promise'
|
||||
import getStream from 'get-stream'
|
||||
@@ -24,7 +23,11 @@ import Xo from 'xo-lib'
|
||||
import { parseOVAFile } from 'xo-vmdk-to-vhd'
|
||||
|
||||
import pkg from '../package'
|
||||
import { load as loadConfig, set as setConfig, unset as unsetConfig } from './config'
|
||||
import {
|
||||
load as loadConfig,
|
||||
set as setConfig,
|
||||
unset as unsetConfig,
|
||||
} from './config'
|
||||
|
||||
function help() {
|
||||
return stripIndent(
|
||||
@@ -118,7 +121,11 @@ function nodeStringDecoder(buffer, encoder) {
|
||||
|
||||
export async function inspect(args) {
|
||||
const file = args[0]
|
||||
const data = await parseOVAFile(new NodeParsableFile(file, (await stat(file)).size), nodeStringDecoder, true)
|
||||
const data = await parseOVAFile(
|
||||
new NodeParsableFile(file, (await stat(file)).size),
|
||||
nodeStringDecoder,
|
||||
true
|
||||
)
|
||||
console.log('file metadata:', data)
|
||||
}
|
||||
|
||||
@@ -152,10 +159,14 @@ export async function upload(args) {
|
||||
overrides = parseOverride(args)
|
||||
}
|
||||
|
||||
const data = await parseOVAFile(new NodeParsableFile(file, (await stat(file)).size), nodeStringDecoder)
|
||||
const data = await parseOVAFile(
|
||||
new NodeParsableFile(file, (await stat(file)).size),
|
||||
nodeStringDecoder
|
||||
)
|
||||
const params = { sr: srId }
|
||||
const xo = await connect()
|
||||
const getXoObject = async filter => Object.values(await xo.call('xo.getAllObjects', { filter }))[0]
|
||||
const getXoObject = async filter =>
|
||||
Object.values(await xo.call('xo.getAllObjects', { filter }))[0]
|
||||
const sr = await getXoObject({ id: srId })
|
||||
const pool = await getXoObject({ id: sr.$poolId })
|
||||
const master = await getXoObject({ id: pool.master })
|
||||
@@ -188,17 +199,8 @@ export async function upload(args) {
|
||||
let keys, key, url
|
||||
if (isObject(result) && (keys = getKeys(result)).length === 1) {
|
||||
key = keys[0]
|
||||
|
||||
if (key === '$sendTo') {
|
||||
const formData = new FormData()
|
||||
if (data.tables !== undefined) {
|
||||
for (const k in data.tables) {
|
||||
const tables = await data.tables[k]
|
||||
delete data.tables[k]
|
||||
for (const l in tables) {
|
||||
formData.append(l, Buffer.from(tables[l]), k)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (typeof file !== 'string') {
|
||||
// eslint-disable-next-line no-throw-literal
|
||||
throw 'file parameter should be a path'
|
||||
@@ -216,9 +218,16 @@ export async function upload(args) {
|
||||
printProgress
|
||||
),
|
||||
])
|
||||
formData.append('file', input, { filename: 'file', knownLength: length })
|
||||
|
||||
try {
|
||||
return await hrp.post(url.toString(), { body: formData, headers: formData.getHeaders() }).readAll('utf-8')
|
||||
return await hrp
|
||||
.post(url.toString(), {
|
||||
body: input,
|
||||
headers: {
|
||||
'content-length': length,
|
||||
},
|
||||
})
|
||||
.readAll('utf-8')
|
||||
} catch (e) {
|
||||
console.log('ERROR', e)
|
||||
console.log('ERROR content', await e.response.readAll('utf-8'))
|
||||
@@ -251,7 +260,10 @@ export class NodeParsableFile {
|
||||
)
|
||||
// crazy stuff to get a browser-compatible ArrayBuffer from a node buffer
|
||||
// https://stackoverflow.com/a/31394257/72637
|
||||
return result.buffer.slice(result.byteOffset, result.byteOffset + result.byteLength)
|
||||
return result.buffer.slice(
|
||||
result.byteOffset,
|
||||
result.byteOffset + result.byteLength
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -282,7 +294,9 @@ export default async function main(args) {
|
||||
if (!args || !args.length || args[0] === '-h' || args[0] === '--help') {
|
||||
return help()
|
||||
}
|
||||
const fnName = args[0].replace(/^--|-\w/g, match => (match === '--' ? '' : match[1].toUpperCase()))
|
||||
const fnName = args[0].replace(/^--|-\w/g, match =>
|
||||
match === '--' ? '' : match[1].toUpperCase()
|
||||
)
|
||||
if (fnName in exports) {
|
||||
return exports[fnName](args.slice(1))
|
||||
}
|
||||
|
||||
367
CHANGELOG.md
367
CHANGELOG.md
@@ -1,370 +1,9 @@
|
||||
# ChangeLog
|
||||
|
||||
## **5.55.1** (2021-02-05)
|
||||
|
||||
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [LDAP] "Synchronize LDAP groups" button: fix imported LDAP users not being correctly added or removed from groups in some cases (PR [#5545](https://github.com/vatesfr/xen-orchestra/pull/5545))
|
||||
- [VM migration] Fix `VIF_NOT_IN_MAP` error (PR [5544](https://github.com/vatesfr/xen-orchestra/pull/5544))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-auth-ldap 0.10.2
|
||||
- xo-server 5.74.1
|
||||
|
||||
## **5.55.0** (2021-01-29)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Web hooks] Possibility to wait a response from the server before continuing [#4948](https://github.com/vatesfr/xen-orchestra/issues/4948) (PR [#5420](https://github.com/vatesfr/xen-orchestra/pull/5420))
|
||||
- [XOA/update] Add a link to the channel's changelog (PR [#5494](https://github.com/vatesfr/xen-orchestra/pull/5494))
|
||||
- Assign custom date-time fields on pools, hosts, SRs, and VMs in advanced tab [#4730](https://github.com/vatesfr/xen-orchestra/issues/4730) (PR [#5473](https://github.com/vatesfr/xen-orchestra/pull/5473))
|
||||
- [Health] Show duplicated MAC addresses with their VIFs, VMs and networks [#5448](https://github.com/vatesfr/xen-orchestra/issues/5448) (PR [#5468](https://github.com/vatesfr/xen-orchestra/pull/5468))
|
||||
- [Pool/advanced] Ability to define default migration network [#3788](https://github.com/vatesfr/xen-orchestra/issues/3788#issuecomment-743207834) (PR [#5465](https://github.com/vatesfr/xen-orchestra/pull/5465))
|
||||
- [Proxy] Support metadata backups (PRs [#5499](https://github.com/vatesfr/xen-orchestra/pull/5499) [#5517](https://github.com/vatesfr/xen-orchestra/pull/5517) [#5519](https://github.com/vatesfr/xen-orchestra/pull/5519) [#5520](https://github.com/vatesfr/xen-orchestra/pull/5520))
|
||||
- [VM/console] Add button to connect to the VM via the local RDP client [#5495](https://github.com/vatesfr/xen-orchestra/issues/5495) (PR [#5523](https://github.com/vatesfr/xen-orchestra/pull/5523))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Host/stats] Show interfaces' names in graph "Network throughput" instead of PIFs' indices (PR [#5483](https://github.com/vatesfr/xen-orchestra/pull/5483))
|
||||
- [Metadata backups] Ability to link a backup to a proxy (PR [#4206](https://github.com/vatesfr/xen-orchestra/pull/4206))
|
||||
- [VM] Ability to set guest secure boot (guest secure boot is available soon in XCP-ng) [#5502](https://github.com/vatesfr/xen-orchestra/issues/5502) (PR [#5527](https://github.com/vatesfr/xen-orchestra/pull/5527))
|
||||
- [Proxy] Improve upgrade feedback (PR [#5525](https://github.com/vatesfr/xen-orchestra/pull/5525))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [VM/network] Change VIF's locking mode automatically to `locked` when adding allowed IPs (PR [#5472](https://github.com/vatesfr/xen-orchestra/pull/5472))
|
||||
- [Backup Reports] Don't hide errors during plugin test [#5486](https://github.com/vatesfr/xen-orchestra/issues/5486) (PR [#5491](https://github.com/vatesfr/xen-orchestra/pull/5491))
|
||||
- [Backup reports] Fix malformed sent email in case of multiple VMs (PR [#5479](https://github.com/vatesfr/xen-orchestra/pull/5479))
|
||||
- [Restore/metadata] Ignore disabled remotes on listing backups (PR [#5504](https://github.com/vatesfr/xen-orchestra/pull/5504))
|
||||
- [VM/network] Change VIF's locking mode automatically to `network_default` when changing network (PR [#5500](https://github.com/vatesfr/xen-orchestra/pull/5500))
|
||||
- [Backup/S3] Fix `TimeoutError: Connection timed out after 120000ms` (PR [#5456](https://github.com/vatesfr/xen-orchestra/pull/5456))
|
||||
- [New SR/reattach SR] Fix SR not being properly reattached to hosts [#4546](https://github.com/vatesfr/xen-orchestra/issues/4546) (PR [#5488](https://github.com/vatesfr/xen-orchestra/pull/5488))
|
||||
- [Home/pool] Missing patches warning: fix 1 patch showing as missing in case of error [#4922](https://github.com/vatesfr/xen-orchestra/issues/4922)
|
||||
- [Proxy/remote] Fix error not updated on remote test (PR [#5514](https://github.com/vatesfr/xen-orchestra/pull/5514))
|
||||
- [Home/SR] Sort SR usage in % instead of bytes [#5463](https://github.com/vatesfr/xen-orchestra/issues/5463) (PR [#5513](https://github.com/vatesfr/xen-orchestra/pull/5513))
|
||||
- [VM migration] Fix `SR_NOT_ATTACHED` error when migration network is selected (PR [#5516](https://github.com/vatesfr/xen-orchestra/pull/5516))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/fs 0.12.1
|
||||
- xo-server-backup-reports 0.16.8
|
||||
- xo-server 5.74.0
|
||||
- xo-web 5.77.0
|
||||
- xo-server-web-hooks 0.3.0
|
||||
|
||||
## **5.54.0** (2020-12-29)
|
||||
|
||||
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Home] Ability to sort VMs by total disks physical usage (PR [#5418](https://github.com/vatesfr/xen-orchestra/pull/5418))
|
||||
- [Home/VM] Ability to choose network for bulk migration within a pool (PR [#5427](https://github.com/vatesfr/xen-orchestra/pull/5427))
|
||||
- [Host] Ability to set host control domain memory [#2218](https://github.com/vatesfr/xen-orchestra/issues/2218) (PR [#5437](https://github.com/vatesfr/xen-orchestra/pull/5437))
|
||||
- [Patches] Rolling pool update: automatically patch and restart a whole pool by live migrating running VMs back and forth as needed [#5286](https://github.com/vatesfr/xen-orchestra/issues/5286) (PR [#5430](https://github.com/vatesfr/xen-orchestra/pull/5430))
|
||||
- [Host] Replace `disabled/enabled state` by `maintenance mode` (PR [#5421](https://github.com/vatesfr/xen-orchestra/pull/5421))
|
||||
- [Dashboard/Overview] Filter out `udev` SRs [#5423](https://github.com/vatesfr/xen-orchestra/issues/5423) (PR [#5453](https://github.com/vatesfr/xen-orchestra/pull/5453))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Plugins] Add user feedback when a plugin test finishes successfully (PR [#5409](https://github.com/vatesfr/xen-orchestra/pull/5409))
|
||||
- [New HBA SR] Show LUN serial and id in LUN selector (PR [#5422](https://github.com/vatesfr/xen-orchestra/pull/5422))
|
||||
- [Proxy] Ability to delete VM backups (PR [#5428](https://github.com/vatesfr/xen-orchestra/pull/5428))
|
||||
- [VM/disks, SR/disks] Destroy/forget VDIs: improve tooltip messages (PR [#5435](https://github.com/vatesfr/xen-orchestra/pull/5435))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Host] Fix `an error has occurred` on accessing a host's page (PR [#5417](https://github.com/vatesfr/xen-orchestra/pull/5417))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-web 5.76.0
|
||||
- xo-server 5.73.0
|
||||
|
||||
## **5.53.1** (2020-12-10)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [OVA/import] Fix OVA CLI import tool (PR [#5432](https://github.com/vatesfr/xen-orchestra/pull/5432))
|
||||
- [Jobs] Fix `Cannot read property id of undefined` error when running a job without a schedule [#5425](https://github.com/vatesfr/xen-orchestra/issues/5425) (PR [#5426](https://github.com/vatesfr/xen-orchestra/pull/5426))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/upload-ova 0.1.4
|
||||
- xo-server 5.72.0
|
||||
|
||||
## **5.53.0** (2020-11-30)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [LDAP] Prevent LDAP-provided groups from being edited from XO [#1884](https://github.com/vatesfr/xen-orchestra/issues/1884) (PR [#5351](https://github.com/vatesfr/xen-orchestra/pull/5351))
|
||||
- [Licensing] Allow Free and Starter users to copy VMs and create a VM from snapshot on the same pool [#4890](https://github.com/vatesfr/xen-orchestra/issues/4890) (PR [5333](https://github.com/vatesfr/xen-orchestra/pull/5333))
|
||||
- [SR] Use SR type `zfs` instead of `file` for ZFS storage repositories (PR [5302](https://github.com/vatesfr/xen-orchestra/pull/5330))
|
||||
- [Dashboard/Health] List VMs with missing or outdated guest tools (PR [#5376](https://github.com/vatesfr/xen-orchestra/pull/5376))
|
||||
- [VIF] Ability for admins to set any allowed IPs, including IPv6 and IPs that are not in an IP pool [#2535](https://github.com/vatesfr/xen-orchestra/issues/2535) [#1872](https://github.com/vatesfr/xen-orchestra/issues/1872) (PR [#5367](https://github.com/vatesfr/xen-orchestra/pull/5367))
|
||||
- [Proxy] Ability to restore a file from VM backup (PR [#5359](https://github.com/vatesfr/xen-orchestra/pull/5359))
|
||||
- [Web Hooks] `backupNg.runJob` is now triggered by scheduled runs [#5205](https://github.com/vatesfr/xen-orchestra/issues/5205) (PR [#5360](https://github.com/vatesfr/xen-orchestra/pull/5360))
|
||||
- [Licensing] Add trial end information banner (PR [#5374](https://github.com/vatesfr/xen-orchestra/pull/5374))
|
||||
- Assign custom fields on pools, hosts, SRs, and VMs in advanced tab [#4730](https://github.com/vatesfr/xen-orchestra/issues/4730) (PR [#5387](https://github.com/vatesfr/xen-orchestra/pull/5387))
|
||||
- Ability to change the number of items displayed per table or page (PR [#5355](https://github.com/vatesfr/xen-orchestra/pull/5355))
|
||||
- [VM] Handle setting memory when DMC is disabled [#4978](https://github.com/vatesfr/xen-orchestra/issues/4978) & [#5326](https://github.com/vatesfr/xen-orchestra/issues/5326) (PR [#5412](https://github.com/vatesfr/xen-orchestra/pull/5412))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Remotes/NFS] Only mount with `vers=3` when no other options [#4940](https://github.com/vatesfr/xen-orchestra/issues/4940) (PR [#5354](https://github.com/vatesfr/xen-orchestra/pull/5354))
|
||||
- [VM/network] Don't change VIF's locking mode automatically (PR [#5357](https://github.com/vatesfr/xen-orchestra/pull/5357))
|
||||
- [Import OVA] Fix 'Max payload size exceeded' error when importing huge OVAs (PR [#5372](https://github.com/vatesfr/xen-orchestra/pull/5372))
|
||||
- [Backup] Make backup directories only accessible by root users (PR [#5378](https://github.com/vatesfr/xen-orchestra/pull/5378))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-auth-ldap 0.10.1
|
||||
- @vates/multi-key-map 0.1.0
|
||||
- @xen-orchestra/fs 0.12.0
|
||||
- vhd-lib 1.0.0
|
||||
- xo-vmdk-to-vhd 2.0.0
|
||||
- xo-server-web-hooks 0.2.0
|
||||
- xo-server 5.71.2
|
||||
- xo-web 5.75.0
|
||||
|
||||
## **5.52.0** (2020-10-30)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Host/Advanced] Display installed certificates with ability to install a new certificate [#5134](https://github.com/vatesfr/xen-orchestra/issues/5134) (PRs [#5319](https://github.com/vatesfr/xen-orchestra/pull/5319) [#5332](https://github.com/vatesfr/xen-orchestra/pull/5332))
|
||||
- [VM/network] Allow Self Service users to change a VIF's network [#5020](https://github.com/vatesfr/xen-orchestra/issues/5020) (PR [#5203](https://github.com/vatesfr/xen-orchestra/pull/5203))
|
||||
- [Host/Advanced] Ability to change the scheduler granularity. Only available on XCP-ng >= 8.2 [#5291](https://github.com/vatesfr/xen-orchestra/issues/5291) (PR [#5320](https://github.com/vatesfr/xen-orchestra/pull/5320))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [New SSH key] Show warning when the SSH key already exists (PR [#5329](https://github.com/vatesfr/xen-orchestra/pull/5329))
|
||||
- [Pool/Network] Add a tooltip to the `Automatic` column (PR [#5345](https://github.com/vatesfr/xen-orchestra/pull/5345))
|
||||
- [LDAP] Ability to force group synchronization [#1884](https://github.com/vatesfr/xen-orchestra/issues/1884) (PR [#5343](https://github.com/vatesfr/xen-orchestra/pull/5343))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Host] Fix power state stuck on busy after power off [#4919](https://github.com/vatesfr/xen-orchestra/issues/4919) (PR [#5288](https://github.com/vatesfr/xen-orchestra/pull/5288))
|
||||
- [VM/Network] Don't allow users to change a VIF's locking mode if they don't have permissions on the network (PR [#5283](https://github.com/vatesfr/xen-orchestra/pull/5283))
|
||||
- [Backup/overview] Add tooltip on the running backup job button (PR [#5325 ](https://github.com/vatesfr/xen-orchestra/pull/5325))
|
||||
- [VM] Show snapshot button in toolbar for Self Service users (PR [#5324](https://github.com/vatesfr/xen-orchestra/pull/5324))
|
||||
- [User] Fallback to default filter on resetting customized filter (PR [#5321](https://github.com/vatesfr/xen-orchestra/pull/5321))
|
||||
- [Home] Show error notification when bulk VM snapshot fails (PR [#5323](https://github.com/vatesfr/xen-orchestra/pull/5323))
|
||||
- [Backup] Skip VMs currently migrating
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-auth-ldap 0.10.0
|
||||
- vhd-lib 0.8.0
|
||||
- @xen-orchestra/audit-core 0.2.0
|
||||
- xo-server-audit 0.9.0
|
||||
- xo-web 5.74.0
|
||||
- xo-server 5.70.0
|
||||
|
||||
## **5.51.1** (2020-10-14)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Host/Advanced] Add the field `IOMMU` if it is defined (PR [#5294](https://github.com/vatesfr/xen-orchestra/pull/5294))
|
||||
- [Backup logs/report] Hide merge task when no merge is done (PR [#5263](https://github.com/vatesfr/xen-orchestra/pull/5263))
|
||||
- [New backup] Enable created schedules by default (PR [#5280](https://github.com/vatesfr/xen-orchestra/pull/5280))
|
||||
- [Backup/overview] Link backup jobs/schedules to their corresponding logs [#4564](https://github.com/vatesfr/xen-orchestra/issues/4564) (PR [#5260](https://github.com/vatesfr/xen-orchestra/pull/5260))
|
||||
- [VM] Hide backup tab for non-admin users [#5309](https://github.com/vatesfr/xen-orchestra/issues/5309) (PR [#5317](https://github.com/vatesfr/xen-orchestra/pull/5317))
|
||||
- [VM/Bulk migrate] Sort hosts in the select so that the hosts on the same pool are shown first [#4462](https://github.com/vatesfr/xen-orchestra/issues/4462) (PR [#5308](https://github.com/vatesfr/xen-orchestra/pull/5308))
|
||||
- [Proxy] Ability to update HTTP proxy configuration on XOA proxy (PR [#5148](https://github.com/vatesfr/xen-orchestra/pull/5148))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [XOA/Notifications] Don't show expired notifications (PR [#5304](https://github.com/vatesfr/xen-orchestra/pull/5304))
|
||||
- [Backup/S3] Fix secret key edit form [#5233](https://github.com/vatesfr/xen-orchestra/issues/5233) (PR[#5305](https://github.com/vatesfr/xen-orchestra/pull/5305))
|
||||
- [New network] Remove the possibility of creating a network on a bond member interface (PR [#5262](https://github.com/vatesfr/xen-orchestra/pull/5262))
|
||||
- [User] Fix custom filters not showing up when selecting a default filter for templates (PR [#5298](https://github.com/vatesfr/xen-orchestra/pull/5298))
|
||||
- [Self/VDI migration] Fix hidden VDI after migration (PR [#5296](https://github.com/vatesfr/xen-orchestra/pull/5296))
|
||||
- [Self/VDI migration] Fix `not enough permissions` error (PR [#5299](https://github.com/vatesfr/xen-orchestra/pull/5299))
|
||||
- [Home] Hide backup filter for non-admin users [#5285](https://github.com/vatesfr/xen-orchestra/issues/5285) (PR [#5264](https://github.com/vatesfr/xen-orchestra/pull/5264))
|
||||
- [Backup/S3] Fix request signature error [#5253](https://github.com/vatesfr/xen-orchestra/issues/5253) (PR[#5315](https://github.com/vatesfr/xen-orchestra/pull/5315))
|
||||
- [SDN Controller] Fix tunnel traffic going on the wrong NIC: see https://xcp-ng.org/forum/topic/3544/mtu-problems-with-vxlan. (PR [#5281](https://github.com/vatesfr/xen-orchestra/pull/5281))
|
||||
- [Settings/IP Pools] Fix some IP ranges being split into multiple ranges in the UI [#3170](https://github.com/vatesfr/xen-orchestra/issues/3170) (PR [#5314](https://github.com/vatesfr/xen-orchestra/pull/5314))
|
||||
- [Self/Delete] Detach VMs and remove their ACLs on removing a resource set [#4797](https://github.com/vatesfr/xen-orchestra/issues/4797) (PR [#5312](https://github.com/vatesfr/xen-orchestra/pull/5312))
|
||||
- Fix `not enough permissions` error when accessing some pages as a Self Service user (PR [#5303](https://github.com/vatesfr/xen-orchestra/pull/5303))
|
||||
- [VM] Explicit error when VM migration failed due to unset default SR on destination pool [#5282](https://github.com/vatesfr/xen-orchestra/issues/5282) (PR [#5306](https://github.com/vatesfr/xen-orchestra/pull/5306))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-sdn-controller 1.0.4
|
||||
- xo-server-backup-reports 0.16.7
|
||||
- xo-server 5.68.0
|
||||
- xo-web 5.72.0
|
||||
|
||||
## **5.51.0** (2020-09-30)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Self/VDI migration] Ability to migrate VDIs to other SRs within a resource set [#5020](https://github.com/vatesfr/xen-orchestra/issues/5020) (PR [#5201](https://github.com/vatesfr/xen-orchestra/pull/5201))
|
||||
- [LDAP] Ability to import LDAP groups to XO [#1884](https://github.com/vatesfr/xen-orchestra/issues/1884) (PR [#5279](https://github.com/vatesfr/xen-orchestra/pull/5279))
|
||||
- [Tasks] Show XO objects linked to pending/finished tasks [#4275](https://github.com/vatesfr/xen-orchestra/issues/4275) (PR [#5267](https://github.com/vatesfr/xen-orchestra/pull/5267))
|
||||
- [Backup logs] Ability to filter by VM/pool name [#4406](https://github.com/vatesfr/xen-orchestra/issues/4406) (PR [#5208](https://github.com/vatesfr/xen-orchestra/pull/5208))
|
||||
- [Backup/logs] Log's tasks pagination [#4406](https://github.com/vatesfr/xen-orchestra/issues/4406) (PR [#5209](https://github.com/vatesfr/xen-orchestra/pull/5209))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM Import] Make the `Description` field optional (PR [#5258](https://github.com/vatesfr/xen-orchestra/pull/5258))
|
||||
- [New VM] Hide missing ISOs in selector [#5222](https://github.com/vatesfr/xen-orchestra/issues/5222)
|
||||
- [Dashboard/Health] Show VMs that have too many snapshots [#5238](https://github.com/vatesfr/xen-orchestra/pull/5238)
|
||||
- [Groups] Ability to delete multiple groups at once (PR [#5264](https://github.com/vatesfr/xen-orchestra/pull/5264))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Import VMDK] Fix `No position specified for vmdisk1` error (PR [#5255](https://github.com/vatesfr/xen-orchestra/pull/5255))
|
||||
- [API] Fix `this.removeSubjectFromResourceSet is not a function` error on calling `resourceSet.removeSubject` via `xo-cli` [#5265](https://github.com/vatesfr/xen-orchestra/issues/5265) (PR [#5266](https://github.com/vatesfr/xen-orchestra/pull/5266))
|
||||
- [Import OVA] Fix frozen UI when dropping a big OVA on the page (PR [#5274](https://github.com/vatesfr/xen-orchestra/pull/5274))
|
||||
- [Remotes/S3] Fix S3 backup of 50GB+ files [#5197](https://github.com/vatesfr/xen-orchestra/issues/5197) (PR[ #5242](https://github.com/vatesfr/xen-orchestra/pull/5242) )
|
||||
- [Import OVA] Improve import speed of embedded gzipped VMDK disks (PR [#5275](https://github.com/vatesfr/xen-orchestra/pull/5275))
|
||||
- [Remotes] Fix editing bucket and directory for S3 remotes [#5233](https://github.com/vatesfr/xen-orchestra/issues/5233) (PR [5276](https://github.com/vatesfr/xen-orchestra/pull/5276))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-auth-ldap 0.9.0
|
||||
- @xen-orchestra/fs 0.11.1
|
||||
- xo-vmdk-to-vhd 1.3.1
|
||||
- xo-server 5.67.0
|
||||
- xo-web 5.71.0
|
||||
|
||||
## **5.50.3** (2020-09-17)
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-audit 0.8.0
|
||||
|
||||
## **5.50.2** (2020-09-10)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/network] VIF's locking mode: improve tooltip messages [#4713](https://github.com/vatesfr/xen-orchestra/issues/4713) (PR [#5227](https://github.com/vatesfr/xen-orchestra/pull/5227))
|
||||
- [Backup/overview] Link log entry to its job [#4564](https://github.com/vatesfr/xen-orchestra/issues/4564) (PR [#5202](https://github.com/vatesfr/xen-orchestra/pull/5202))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [New SR] Fix `Cannot read property 'trim' of undefined` error (PR [#5212](https://github.com/vatesfr/xen-orchestra/pull/5212))
|
||||
- [Dashboard/Health] Fix suspended VDIs considered as orphans [#5248](https://github.com/vatesfr/xen-orchestra/issues/5248) (PR [#5249](https://github.com/vatesfr/xen-orchestra/pull/5249))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-audit 0.7.2
|
||||
- xo-web 5.70.0
|
||||
- xo-server 5.66.2
|
||||
|
||||
## **5.50.1** (2020-09-04)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Usage report] Exclude replicated VMs from the VMs evolution [#4778](https://github.com/vatesfr/xen-orchestra/issues/4778) (PR [#5241](https://github.com/vatesfr/xen-orchestra/pull/5241))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [VM/Network] Fix TX checksumming [#5234](https://github.com/vatesfr/xen-orchestra/issues/5234)
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-usage-report 0.9.0
|
||||
- xo-server-audit 0.7.1
|
||||
- xo-server 5.66.1
|
||||
|
||||
## **5.50.0** (2020-08-27)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Health/Orphan VDIs] Improve heuristic and list both VDI snapshots and normal VDIs (PR [#5228](https://github.com/vatesfr/xen-orchestra/pull/5228))
|
||||
- [[Audit] Regularly save fingerprints on remote server for better tempering detection](https://xen-orchestra.com/blog/xo-audit/) [#4844](https://github.com/vatesfr/xen-orchestra/issues/4844) (PR [#5077](https://github.com/vatesfr/xen-orchestra/pull/5077))
|
||||
- [VM/Network] Ability to change a VIF's locking mode [#4713](https://github.com/vatesfr/xen-orchestra/issues/4713) (PR [#5188](https://github.com/vatesfr/xen-orchestra/pull/5188))
|
||||
- [VM/Network] Ability to set VIF TX checksumming [#5095](https://github.com/vatesfr/xen-orchestra/issues/5095) (PR [#5182](https://github.com/vatesfr/xen-orchestra/pull/5182))
|
||||
- [Host/Network] Button to refresh the list of physical interfaces [#5230](https://github.com/vatesfr/xen-orchestra/issues/5230)
|
||||
- [VM] Ability to protect VM from accidental shutdown [#4773](https://github.com/vatesfr/xen-orchestra/issues/4773)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Proxy] Improve health check error messages [#5161](https://github.com/vatesfr/xen-orchestra/issues/5161) (PR [#5191](https://github.com/vatesfr/xen-orchestra/pull/5191))
|
||||
- [VM/Console] Hide missing ISOs in selector [#5222](https://github.com/vatesfr/xen-orchestra/issues/5222)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Proxy/deploy] Fix `no such proxy ok` error on a failure trial start (PR [#5196](https://github.com/vatesfr/xen-orchestra/pull/5196))
|
||||
- [VM/snapshots] Fix redirection when creating a VM from a snapshot (PR [#5213](https://github.com/vatesfr/xen-orchestra/pull/5213))
|
||||
- [User] Fix `Incorrect password` error when changing password [#5218](https://github.com/vatesfr/xen-orchestra/issues/5218) (PR [#5221](https://github.com/vatesfr/xen-orchestra/pull/5221))
|
||||
- [Audit] Obfuscate sensitive data in `user.changePassword` action's records [#5219](https://github.com/vatesfr/xen-orchestra/issues/5219) (PR [#5220](https://github.com/vatesfr/xen-orchestra/pull/5220))
|
||||
- [SDN Controller] Fix `Cannot read property '$network' of undefined` error at the network creation (PR [#5217](https://github.com/vatesfr/xen-orchestra/pull/5217))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-audit 0.7.0
|
||||
- xo-server-sdn-controller 1.0.3
|
||||
- xo-server 5.66.0
|
||||
- xo-web 5.69.0
|
||||
|
||||
## **5.49.1** (2020-08-05)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SR/advanced] Show thin/thick provisioning for missing SR types (PR [#5204](https://github.com/vatesfr/xen-orchestra/pull/5204))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Patches] Don't log errors related to missing patches listing (Previous fix in 5.48.3 was not working)
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server 5.64.1
|
||||
- xo-server-sdn-controller 1.0.2
|
||||
- xo-web 5.67.0
|
||||
|
||||
## **5.49.0** (2020-07-31)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Home/VM, host] Ability to filter by power state (PR [#5118](https://github.com/vatesfr/xen-orchestra/pull/5118))
|
||||
- [Proxy/deploy] Ability to set HTTP proxy configuration (PR [#5145](https://github.com/vatesfr/xen-orchestra/pull/5145))
|
||||
- [Import/OVA] Allow for VMDK disks inside .ova files to be gzipped (PR [#5085](https://github.com/vatesfr/xen-orchestra/pull/5085))
|
||||
- [Proxy] Show pending upgrades (PR [#5167](https://github.com/vatesfr/xen-orchestra/pull/5167))
|
||||
- [SDN Controller] Add/Remove netork traffic rules for a VM's VIFs (PR [#5135](https://github.com/vatesfr/xen-orchestra/pull/5135))
|
||||
- [Backup/health] Show VM snapshots with missing jobs, schedules or VMs [#5086](https://github.com/vatesfr/xen-orchestra/issues/5086) (PR [#5125](https://github.com/vatesfr/xen-orchestra/pull/5125))
|
||||
- [New delta backup] Show a warning icon when the advanced full backup interval setting and the backup retention are higher than 50 (PR (https://github.com/vatesfr/xen-orchestra/pull/5144))
|
||||
- [VM/network] Improve the network locking mode feedback [#4713](https://github.com/vatesfr/xen-orchestra/issues/4713) (PR [#5170](https://github.com/vatesfr/xen-orchestra/pull/5170))
|
||||
- [Remotes] Add AWS S3 as a backup storage
|
||||
- [New VM] Only make network boot option first when the VM has no disks or when the network installation is chosen [#4980](https://github.com/vatesfr/xen-orchestra/issues/4980) (PR [#5119](https://github.com/vatesfr/xen-orchestra/pull/5119))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Log the `Invalid XML-RPC message` error as an unexpected response (PR [#5138](https://github.com/vatesfr/xen-orchestra/pull/5138))
|
||||
- [VM/disks] By default, sort disks by their device position instead of their name [#5163](https://github.com/vatesfr/xen-orchestra/issues/5163) (PR [#5165](https://github.com/vatesfr/xen-orchestra/pull/5165))
|
||||
- [Schedule/edit] Ability to enable/disable an ordinary job's schedule [#5026](https://github.com/vatesfr/xen-orchestra/issues/5026) (PR [#5111](https://github.com/vatesfr/xen-orchestra/pull/5111))
|
||||
- [New schedule] Enable 'Enable immediately after creation' by default (PR [#5111](https://github.com/vatesfr/xen-orchestra/pull/5111))
|
||||
- [Self Service] Ability to globally ignore snapshots in resource set quotas (PR [#5164](https://github.com/vatesfr/xen-orchestra/pull/5164))
|
||||
- [Self] Ability to cancel a resource set edition without saving it (PR [#5174](https://github.com/vatesfr/xen-orchestra/pull/5174))
|
||||
- [VIF] Ability to click an IP address to copy it to the clipboard [#5185](https://github.com/vatesfr/xen-orchestra/issues/5185) (PR [#5186](https://github.com/vatesfr/xen-orchestra/pull/5186))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Backup/Restore] Fixes `an error has occurred` when all backups for a specific VM have been deleted (PR [#5156](https://github.com/vatesfr/xen-orchestra/pull/5156))
|
||||
- [OVA Import] Fix import of Red Hat generated .ova files (PR [#5159](https://github.com/vatesfr/xen-orchestra/pull/5159))
|
||||
- [Fast clone] Fix bug where the name of the created VM would be "undefined_clone" (PR [#5173](https://github.com/vatesfr/xen-orchestra/pull/5173))
|
||||
- [Audit] Fix unreadable exported records format (PR [#5179](https://github.com/vatesfr/xen-orchestra/pull/5179))
|
||||
- [SDN Controller] Fixes TLS error `dh key too small` [#5074](https://github.com/vatesfr/xen-orchestra/issues/5074) (PR [#5187](https://github.com/vatesfr/xen-orchestra/pull/5187))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-audit 0.6.1
|
||||
- @xen-orchestra/openflow 0.1.1
|
||||
- xo-server-sdn-controller 1.0.1
|
||||
- xo-vmdk-to-vhd 1.3.0
|
||||
- xo-remote-parser 0.6.0
|
||||
- @xen-orchestra/fs 0.11.0
|
||||
- xo-server 5.64.0
|
||||
- xo-web 5.66.0
|
||||
|
||||
## **5.48.3** (2020-07-10)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Audit] Logging user actions is now opt-in (PR [#5151](https://github.com/vatesfr/xen-orchestra/pull/5151))
|
||||
@@ -463,6 +102,8 @@
|
||||
|
||||
## **5.47.1** (2020-06-02)
|
||||
|
||||

|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [auth-ldap] Sign in was broken in XO 5.47.0 (PR [#5039](https://github.com/vatesfr/xen-orchestra/pull/5039))
|
||||
|
||||
@@ -7,20 +7,10 @@
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Task] Display age and estimated duration (PR [#5530](https://github.com/vatesfr/xen-orchestra/pull/5530))
|
||||
- [Proxy] Ask for a confirmation before upgrading a proxy with running backups (PR [#5533](https://github.com/vatesfr/xen-orchestra/pull/5533))
|
||||
- [Backup/restore] Allow backup restore to any licence even if XOA isn't registered (PR [#5547](https://github.com/vatesfr/xen-orchestra/pull/5547))
|
||||
- [Import] Ignore case when detecting file type (PR [#5574](https://github.com/vatesfr/xen-orchestra/pull/5574))
|
||||
- [Backup] Ability to set a specific schedule to always run full backups [#5541](https://github.com/vatesfr/xen-orchestra/issues/5541) (PR [#5546](https://github.com/vatesfr/xen-orchestra/pull/5546))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [VM/Snapshot export] Fix `Error: no available place in queue` on canceling an export via browser then starting a new one when the concurrency threshold is reached [#5535](https://github.com/vatesfr/xen-orchestra/issues/5535) (PR [#5538](https://github.com/vatesfr/xen-orchestra/pull/5538))
|
||||
- [Servers] Hide pool's objects if its master is unreachable [#5475](https://github.com/vatesfr/xen-orchestra/issues/5475) (PR [#5526](https://github.com/vatesfr/xen-orchestra/pull/5526))
|
||||
- [Host] Restart toolstack: fix `ECONNREFUSED` error (PR [#5553](https://github.com/vatesfr/xen-orchestra/pull/5553))
|
||||
|
||||
### Packages to release
|
||||
|
||||
> Packages will be released in the order they are here, therefore, they should
|
||||
@@ -37,9 +27,3 @@
|
||||
> - major: if the change breaks compatibility
|
||||
>
|
||||
> In case of conflict, the highest (lowest in previous list) `$version` wins.
|
||||
|
||||
- @xen-orchestra/fs minor
|
||||
- xen-api patch
|
||||
- xo-common minor
|
||||
- xo-server minor
|
||||
- xo-web minor
|
||||
|
||||
@@ -35,7 +35,6 @@ module.exports = {
|
||||
['/supported_hosts', 'Host Compatibility List'],
|
||||
['/installation', 'Installation'],
|
||||
['/configuration', 'Configuration'],
|
||||
['/migrate_to_new_xoa', 'Migrate to new XOA'],
|
||||
['/updater', 'Updates'],
|
||||
['/architecture', 'Architecture'],
|
||||
['/troubleshooting', 'Troubleshooting'],
|
||||
@@ -93,7 +92,10 @@ module.exports = {
|
||||
collapsable: false, // optional, defaults to true
|
||||
sidebarDepth: 1, // optional, defaults to 1
|
||||
children: [
|
||||
['https://github.com/vatesfr/xen-orchestra/blob/master/CHANGELOG.md#changelog', 'Changelog'],
|
||||
[
|
||||
'https://github.com/vatesfr/xen-orchestra/blob/master/CHANGELOG.md#changelog',
|
||||
'Changelog',
|
||||
],
|
||||
['/code_of_conduct', 'Code of Conduct'],
|
||||
['/contributing', 'Contributing'],
|
||||
['/licenses', 'Licenses'],
|
||||
|
||||
@@ -172,7 +172,11 @@ const handleHook = data => {
|
||||
const { method, params, type, result, error, timestamp } = JSON.parse(data)
|
||||
|
||||
// Log it
|
||||
console.log(`${new Date(timestamp).toISOString()} [${method}|${type}] ${params} → ${result || error}`)
|
||||
console.log(
|
||||
`${new Date(timestamp).toISOString()} [${method}|${type}] ${params} → ${
|
||||
result || error
|
||||
}`
|
||||
)
|
||||
|
||||
// Run scripts
|
||||
exec(`./hook-scripts/${method}-${type}.sh`)
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 35 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user