Compare commits
1 Commits
complex-ma
...
icinga2-te
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa92f0fc93 |
@@ -17,7 +17,7 @@ Installation of the [npm package](https://npmjs.org/package/@vates/coalesce-call
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import { coalesceCalls } from '@vates/coalesce-calls'
|
||||
import { coalesceCalls } from 'coalesce-calls'
|
||||
|
||||
const connect = coalesceCalls(async function () {
|
||||
// async operation
|
||||
|
||||
@@ -46,6 +46,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
|
||||
@@ -30,7 +30,6 @@
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"core-js": "^3.6.4",
|
||||
"golike-defer": "^0.4.1",
|
||||
"lodash": "^4.17.15",
|
||||
|
||||
@@ -2,12 +2,9 @@
|
||||
import 'core-js/features/symbol/async-iterator'
|
||||
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import defer from 'golike-defer'
|
||||
import hash from 'object-hash'
|
||||
|
||||
const log = createLogger('xo:audit-core')
|
||||
|
||||
export class Storage {
|
||||
constructor() {
|
||||
this._lock = Promise.resolve()
|
||||
@@ -28,7 +25,7 @@ export class Storage {
|
||||
//
|
||||
// http://man7.org/linux/man-pages/man3/crypt.3.html#NOTES
|
||||
const ID_TO_ALGORITHM = {
|
||||
5: 'sha256',
|
||||
'5': 'sha256',
|
||||
}
|
||||
|
||||
export class AlteredRecordError extends Error {
|
||||
@@ -68,17 +65,8 @@ export class AuditCore {
|
||||
@defer
|
||||
async add($defer, subject, event, data) {
|
||||
const time = Date.now()
|
||||
$defer(await this._storage.acquireLock())
|
||||
return this._addUnsafe({
|
||||
data,
|
||||
event,
|
||||
subject,
|
||||
time,
|
||||
})
|
||||
}
|
||||
|
||||
async _addUnsafe({ data, event, subject, time }) {
|
||||
const storage = this._storage
|
||||
$defer(await storage.acquireLock())
|
||||
|
||||
// delete "undefined" properties and normalize data with JSON.stringify
|
||||
const record = JSON.parse(
|
||||
@@ -151,45 +139,4 @@ export class AuditCore {
|
||||
await this._storage.del(id)
|
||||
}
|
||||
}
|
||||
|
||||
@defer
|
||||
async deleteRangeAndRewrite($defer, newest, oldest) {
|
||||
assert.notStrictEqual(newest, undefined)
|
||||
assert.notStrictEqual(oldest, undefined)
|
||||
|
||||
const storage = this._storage
|
||||
$defer(await storage.acquireLock())
|
||||
|
||||
assert.notStrictEqual(await storage.get(newest), undefined)
|
||||
const oldestRecord = await storage.get(oldest)
|
||||
assert.notStrictEqual(oldestRecord, undefined)
|
||||
|
||||
const lastId = await storage.getLastId()
|
||||
const recentRecords = []
|
||||
for await (const record of this.getFrom(lastId)) {
|
||||
if (record.id === newest) {
|
||||
break
|
||||
}
|
||||
|
||||
recentRecords.push(record)
|
||||
}
|
||||
|
||||
for await (const record of this.getFrom(newest)) {
|
||||
await storage.del(record.id)
|
||||
if (record.id === oldest) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
await storage.setLastId(oldestRecord.previousId)
|
||||
|
||||
for (const record of recentRecords) {
|
||||
try {
|
||||
await this._addUnsafe(record)
|
||||
await storage.del(record.id)
|
||||
} catch (error) {
|
||||
log.error(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,10 +17,9 @@ interface Record {
|
||||
}
|
||||
|
||||
export class AuditCore {
|
||||
constructor(storage: Storage) { }
|
||||
public add(subject: any, event: string, data: any): Promise<Record> { }
|
||||
public checkIntegrity(oldest: string, newest: string): Promise<number> { }
|
||||
public getFrom(newest?: string): AsyncIterator { }
|
||||
public deleteFrom(newest: string): Promise<void> { }
|
||||
public deleteRangeAndRewrite(newest: string, oldest: string): Promise<void> { }
|
||||
constructor(storage: Storage) {}
|
||||
public add(subject: any, event: string, data: any): Promise<Record> {}
|
||||
public checkIntegrity(oldest: string, newest: string): Promise<number> {}
|
||||
public getFrom(newest?: string): AsyncIterator {}
|
||||
public deleteFrom(newest: string): Promise<void> {}
|
||||
}
|
||||
|
||||
@@ -3,17 +3,6 @@ const { dirname } = require('path')
|
||||
const fs = require('promise-toolbox/promisifyAll')(require('fs'))
|
||||
module.exports = fs
|
||||
|
||||
fs.getSize = path =>
|
||||
fs.stat(path).then(
|
||||
_ => _.size,
|
||||
error => {
|
||||
if (error.code === 'ENOENT') {
|
||||
return 0
|
||||
}
|
||||
throw error
|
||||
}
|
||||
)
|
||||
|
||||
fs.mktree = async function mkdirp(path) {
|
||||
try {
|
||||
await fs.mkdir(path)
|
||||
|
||||
@@ -8,10 +8,9 @@ let force
|
||||
const assert = require('assert')
|
||||
const flatten = require('lodash/flatten')
|
||||
const getopts = require('getopts')
|
||||
const limitConcurrency = require('limit-concurrency-decorator').default
|
||||
const lockfile = require('proper-lockfile')
|
||||
const pipe = require('promise-toolbox/pipe')
|
||||
const { default: Vhd, mergeVhd } = require('vhd-lib')
|
||||
const { default: Vhd } = require('vhd-lib')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
|
||||
const { isValidXva } = require('@xen-orchestra/backups/isValidXva')
|
||||
@@ -27,10 +26,10 @@ const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
|
||||
//
|
||||
// the whole chain will be merged into parent, parent will be renamed to child
|
||||
// and all the others will deleted
|
||||
const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain) {
|
||||
async function mergeVhdChain(chain) {
|
||||
assert(chain.length >= 2)
|
||||
|
||||
let child = chain[0]
|
||||
const child = chain[0]
|
||||
const parent = chain[chain.length - 1]
|
||||
const children = chain.slice(0, -1).reverse()
|
||||
|
||||
@@ -47,36 +46,15 @@ const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain) {
|
||||
// `mergeVhd` does not work with a stream, either
|
||||
// - make it accept a stream
|
||||
// - or create synthetic VHD which is not a stream
|
||||
if (children.length !== 1) {
|
||||
console.warn('TODO: implement merging multiple children')
|
||||
children.length = 1
|
||||
child = children[0]
|
||||
}
|
||||
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
console.log('merging %s: %s/%s', child, done, total)
|
||||
}
|
||||
}, 10e3)
|
||||
|
||||
await mergeVhd(
|
||||
handler,
|
||||
parent,
|
||||
handler,
|
||||
child,
|
||||
// children.length === 1
|
||||
// ? child
|
||||
// : await createSyntheticStream(handler, children),
|
||||
{
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
clearInterval(handle)
|
||||
return console.warn('TODO: implement merge')
|
||||
// await mergeVhd(
|
||||
// handler,
|
||||
// parent,
|
||||
// handler,
|
||||
// children.length === 1
|
||||
// ? child
|
||||
// : await createSyntheticStream(handler, children)
|
||||
// )
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
@@ -88,7 +66,7 @@ const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain) {
|
||||
return force && handler.unlink(child)
|
||||
}),
|
||||
])
|
||||
})
|
||||
}
|
||||
|
||||
const listVhds = pipe([
|
||||
vmDir => vmDir + '/vdis',
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
const groupBy = require('lodash/groupBy')
|
||||
const { createHash } = require('crypto')
|
||||
const { dirname, resolve } = require('path')
|
||||
|
||||
const asyncMap = require('../_asyncMap')
|
||||
const { readdir2, readFile, getSize } = require('../_fs')
|
||||
|
||||
const sha512 = str => createHash('sha512').update(str).digest('hex')
|
||||
const sum = values => values.reduce((a, b) => a + b)
|
||||
|
||||
module.exports = async function info(vmDirs) {
|
||||
const jsonFiles = (
|
||||
await asyncMap(vmDirs, async vmDir =>
|
||||
(await readdir2(vmDir)).filter(_ => _.endsWith('.json'))
|
||||
)
|
||||
).flat()
|
||||
|
||||
const hashes = { __proto__: null }
|
||||
|
||||
const info = (
|
||||
await asyncMap(jsonFiles, async jsonFile => {
|
||||
try {
|
||||
const jsonDir = dirname(jsonFile)
|
||||
const json = await readFile(jsonFile)
|
||||
|
||||
const hash = sha512(json)
|
||||
if (hash in hashes) {
|
||||
console.log(jsonFile, 'duplicate of', hashes[hash])
|
||||
return
|
||||
}
|
||||
hashes[hash] = jsonFile
|
||||
|
||||
const metadata = JSON.parse(json)
|
||||
|
||||
return {
|
||||
jsonDir,
|
||||
jsonFile,
|
||||
metadata,
|
||||
size:
|
||||
json.length +
|
||||
(await (metadata.mode === 'delta'
|
||||
? asyncMap(Object.values(metadata.vhds), _ =>
|
||||
getSize(resolve(jsonDir, _))
|
||||
).then(sum)
|
||||
: getSize(resolve(jsonDir, metadata.xva)))),
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(jsonFile, error)
|
||||
}
|
||||
})
|
||||
).filter(_ => _ !== undefined)
|
||||
const byJobs = groupBy(info, 'metadata.jobId')
|
||||
Object.keys(byJobs)
|
||||
.sort()
|
||||
.forEach(jobId => {
|
||||
console.log(jobId, sum(byJobs[jobId].map(_ => _.size)))
|
||||
})
|
||||
}
|
||||
@@ -13,12 +13,6 @@ require('./_composeCommands')({
|
||||
},
|
||||
usage: 'xo-vm-backups <field path>',
|
||||
},
|
||||
info: {
|
||||
get main() {
|
||||
return require('./commands/info')
|
||||
},
|
||||
usage: 'xo-vm-backups/*',
|
||||
},
|
||||
})(process.argv.slice(2), 'xo-backups').catch(error => {
|
||||
console.error('main', error)
|
||||
process.exitCode = 1
|
||||
|
||||
@@ -7,10 +7,9 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/backups": "^0.1.1",
|
||||
"@xen-orchestra/fs": "^0.11.1",
|
||||
"@xen-orchestra/fs": "^0.10.4",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.15.0",
|
||||
"proper-lockfile": "^4.1.1",
|
||||
@@ -33,7 +32,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.2.1",
|
||||
"version": "0.0.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"d3-time-format": "^3.0.0",
|
||||
"d3-time-format": "^2.2.3",
|
||||
"fs-extra": "^9.0.0"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.11.1",
|
||||
"version": "0.10.4",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
@@ -25,18 +25,17 @@
|
||||
"@marsaud/smb2": "^0.15.0",
|
||||
"@sindresorhus/df": "^3.1.1",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"aws-sdk": "^2.686.0",
|
||||
"decorator-synchronized": "^0.5.0",
|
||||
"execa": "^4.0.2",
|
||||
"fs-extra": "^9.0.0",
|
||||
"get-stream": "^6.0.0",
|
||||
"get-stream": "^5.1.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.15.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^4.0.2",
|
||||
"tmp": "^0.2.1",
|
||||
"xo-remote-parser": "^0.6.0"
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.1.0",
|
||||
"xo-remote-parser": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -59,7 +58,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
},
|
||||
"author": {
|
||||
|
||||
@@ -5,7 +5,7 @@ import getStream from 'get-stream'
|
||||
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import limit from 'limit-concurrency-decorator'
|
||||
import path, { basename } from 'path'
|
||||
import path from 'path'
|
||||
import synchronized from 'decorator-synchronized'
|
||||
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
@@ -121,7 +121,6 @@ export default class RemoteHandlerAbstract {
|
||||
await this.__closeFile(fd)
|
||||
}
|
||||
|
||||
// TODO: remove method
|
||||
async createOutputStream(
|
||||
file: File,
|
||||
{ checksum = false, ...options }: Object = {}
|
||||
@@ -222,15 +221,19 @@ export default class RemoteHandlerAbstract {
|
||||
)
|
||||
}
|
||||
|
||||
// write a stream to a file using a temporary file
|
||||
async outputStream(
|
||||
input: Readable | Promise<Readable>,
|
||||
path: string,
|
||||
{ checksum = true }: { checksum?: boolean } = {}
|
||||
): Promise<void> {
|
||||
path = normalizePath(path)
|
||||
input = await input
|
||||
return this._outputStream(await input, normalizePath(path), { checksum })
|
||||
createWriteStream(
|
||||
file: File,
|
||||
options: { end?: number, flags?: string, start?: number } = {}
|
||||
): Promise<LaxWritable> {
|
||||
return timeout.call(
|
||||
this._createWriteStream(
|
||||
typeof file === 'string' ? normalizePath(file) : file,
|
||||
{
|
||||
flags: 'wx',
|
||||
...options,
|
||||
}
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
// Free the resources possibly dedicated to put the remote at work, when it
|
||||
@@ -318,6 +321,18 @@ export default class RemoteHandlerAbstract {
|
||||
return this._readFile(normalizePath(file), { flags })
|
||||
}
|
||||
|
||||
async refreshChecksum(path: string): Promise<void> {
|
||||
path = normalizePath(path)
|
||||
|
||||
const stream = (await this._createReadStream(path, { flags: 'r' })).pipe(
|
||||
createChecksumStream()
|
||||
)
|
||||
stream.resume() // start reading the whole file
|
||||
await this._outputFile(checksumFile(path), await stream.checksum, {
|
||||
flags: 'wx',
|
||||
})
|
||||
}
|
||||
|
||||
async rename(
|
||||
oldPath: string,
|
||||
newPath: string,
|
||||
@@ -533,22 +548,6 @@ export default class RemoteHandlerAbstract {
|
||||
return this._outputFile(file, data, options)
|
||||
}
|
||||
|
||||
async _outputStream(input, path, { checksum }) {
|
||||
const tmpPath = `${dirname(path)}/.${basename(path)}`
|
||||
const output = await this.createOutputStream(tmpPath, { checksum })
|
||||
try {
|
||||
input.pipe(output)
|
||||
await fromEvent(output, 'finish')
|
||||
await output.checksumWritten
|
||||
// $FlowFixMe
|
||||
await input.task
|
||||
await this.rename(tmpPath, path, { checksum })
|
||||
} catch (error) {
|
||||
await this.unlink(tmpPath, { checksum })
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
_read(
|
||||
file: File,
|
||||
buffer: Buffer,
|
||||
|
||||
@@ -42,6 +42,18 @@ describe('createOutputStream()', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('createReadStream()', () => {
|
||||
it(`throws in case of timeout`, async () => {
|
||||
const testHandler = new TestHandler({
|
||||
createReadStream: () => new Promise(() => {}),
|
||||
})
|
||||
|
||||
const promise = testHandler.createReadStream('file')
|
||||
jest.advanceTimersByTime(TIMEOUT)
|
||||
await expect(promise).rejects.toThrowError(TimeoutError)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getInfo()', () => {
|
||||
it('throws in case of timeout', async () => {
|
||||
const testHandler = new TestHandler({
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import 'dotenv/config'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import getStream from 'get-stream'
|
||||
import { forOwn, random } from 'lodash'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
@@ -90,6 +91,31 @@ handlers.forEach(url => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('#createReadStream()', () => {
|
||||
beforeEach(() => handler.outputFile('file', TEST_DATA))
|
||||
|
||||
testWithFileDescriptor('file', 'r', async ({ file, flags }) => {
|
||||
await expect(
|
||||
await getStream.buffer(
|
||||
await handler.createReadStream(file, { flags })
|
||||
)
|
||||
).toEqual(TEST_DATA)
|
||||
})
|
||||
})
|
||||
|
||||
describe('#createWriteStream()', () => {
|
||||
testWithFileDescriptor('file', 'wx', async ({ file, flags }) => {
|
||||
const stream = await handler.createWriteStream(file, { flags })
|
||||
await fromCallback(pipeline, createTestDataStream(), stream)
|
||||
await expect(await handler.readFile('file')).toEqual(TEST_DATA)
|
||||
})
|
||||
|
||||
it('fails if parent dir is missing', async () => {
|
||||
const error = await rejectionOf(handler.createWriteStream('dir/file'))
|
||||
expect(error.code).toBe('ENOENT')
|
||||
})
|
||||
})
|
||||
|
||||
describe('#getInfo()', () => {
|
||||
let info
|
||||
beforeAll(async () => {
|
||||
|
||||
@@ -4,7 +4,6 @@ import execa from 'execa'
|
||||
import type RemoteHandler from './abstract'
|
||||
import RemoteHandlerLocal from './local'
|
||||
import RemoteHandlerNfs from './nfs'
|
||||
import RemoteHandlerS3 from './s3'
|
||||
import RemoteHandlerSmb from './smb'
|
||||
import RemoteHandlerSmbMount from './smb-mount'
|
||||
|
||||
@@ -14,7 +13,6 @@ export type Remote = { url: string }
|
||||
const HANDLERS = {
|
||||
file: RemoteHandlerLocal,
|
||||
nfs: RemoteHandlerNfs,
|
||||
s3: RemoteHandlerS3,
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
@@ -1,284 +0,0 @@
|
||||
import AWS from 'aws-sdk'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
import { createChecksumStream } from './checksum'
|
||||
|
||||
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
|
||||
|
||||
// limits: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
|
||||
const MIN_PART_SIZE = 1024 * 1024 * 5 // 5MB
|
||||
const MAX_PART_SIZE = 1024 * 1024 * 1024 * 5 // 5GB
|
||||
const MAX_PARTS_COUNT = 10000
|
||||
const MAX_OBJECT_SIZE = 1024 * 1024 * 1024 * 1024 * 5 // 5TB
|
||||
const IDEAL_FRAGMENT_SIZE = Math.ceil(MAX_OBJECT_SIZE / MAX_PARTS_COUNT) // the smallest fragment size that still allows a 5TB upload in 10000 fragments, about 524MB
|
||||
export default class S3Handler extends RemoteHandlerAbstract {
|
||||
constructor(remote, _opts) {
|
||||
super(remote)
|
||||
const { host, path, username, password } = parse(remote.url)
|
||||
|
||||
// https://www.zenko.io/blog/first-things-first-getting-started-scality-s3-server/
|
||||
this._s3 = new AWS.S3({
|
||||
accessKeyId: username,
|
||||
apiVersion: '2006-03-01',
|
||||
endpoint: host,
|
||||
s3ForcePathStyle: true,
|
||||
secretAccessKey: password,
|
||||
signatureVersion: 'v4',
|
||||
})
|
||||
const splitPath = path.split('/').filter(s => s.length)
|
||||
this._bucket = splitPath.shift()
|
||||
this._dir = splitPath.join('/')
|
||||
}
|
||||
|
||||
get type() {
|
||||
return 's3'
|
||||
}
|
||||
|
||||
_createParams(file) {
|
||||
return { Bucket: this._bucket, Key: this._dir + file }
|
||||
}
|
||||
|
||||
async _outputStream(input, path, { checksum }) {
|
||||
let inputStream = input
|
||||
if (checksum) {
|
||||
const checksumStream = createChecksumStream()
|
||||
const forwardError = error => {
|
||||
checksumStream.emit('error', error)
|
||||
}
|
||||
input.pipe(checksumStream)
|
||||
input.on('error', forwardError)
|
||||
inputStream = checksumStream
|
||||
}
|
||||
const upload = this._s3.upload(
|
||||
{
|
||||
...this._createParams(path),
|
||||
Body: inputStream,
|
||||
},
|
||||
{ partSize: IDEAL_FRAGMENT_SIZE }
|
||||
)
|
||||
await upload.promise()
|
||||
if (checksum) {
|
||||
const checksum = await inputStream.checksum
|
||||
const params = {
|
||||
...this._createParams(path + '.checksum'),
|
||||
Body: checksum,
|
||||
}
|
||||
await this._s3.upload(params).promise()
|
||||
}
|
||||
await input.task
|
||||
}
|
||||
|
||||
async _writeFile(file, data, options) {
|
||||
return this._s3
|
||||
.putObject({ ...this._createParams(file), Body: data })
|
||||
.promise()
|
||||
}
|
||||
|
||||
async _createReadStream(file, options) {
|
||||
return this._s3.getObject(this._createParams(file)).createReadStream()
|
||||
}
|
||||
|
||||
async _unlink(file) {
|
||||
return this._s3.deleteObject(this._createParams(file)).promise()
|
||||
}
|
||||
|
||||
async _list(dir) {
|
||||
function splitPath(path) {
|
||||
return path.split('/').filter(d => d.length)
|
||||
}
|
||||
|
||||
const prefix = [this._dir, dir].join('/')
|
||||
const splitPrefix = splitPath(prefix)
|
||||
const request = this._s3.listObjectsV2({
|
||||
Bucket: this._bucket,
|
||||
Prefix: splitPrefix.join('/'),
|
||||
})
|
||||
const result = await request.promise()
|
||||
const uniq = new Set()
|
||||
for (const entry of result.Contents) {
|
||||
const line = splitPath(entry.Key)
|
||||
if (line.length > splitPrefix.length) {
|
||||
uniq.add(line[splitPrefix.length])
|
||||
}
|
||||
}
|
||||
return [...uniq]
|
||||
}
|
||||
|
||||
async _rename(oldPath, newPath) {
|
||||
const params = {
|
||||
...this._createParams(newPath),
|
||||
CopySource: `/${this._bucket}/${this._dir}${oldPath}`,
|
||||
}
|
||||
await this._s3.copyObject(params).promise()
|
||||
await this._s3.deleteObject(this._createParams(oldPath)).promise()
|
||||
}
|
||||
|
||||
async _getSize(file) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.fd
|
||||
}
|
||||
const result = await this._s3.headObject(this._createParams(file)).promise()
|
||||
return +result.ContentLength
|
||||
}
|
||||
|
||||
async _read(file, buffer, position = 0) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.fd
|
||||
}
|
||||
const params = this._createParams(file)
|
||||
params.Range = `bytes=${position}-${position + buffer.length - 1}`
|
||||
const result = await this._s3.getObject(params).promise()
|
||||
result.Body.copy(buffer)
|
||||
return { bytesRead: result.Body.length, buffer }
|
||||
}
|
||||
|
||||
async _write(file, buffer, position) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.fd
|
||||
}
|
||||
const uploadParams = this._createParams(file)
|
||||
const fileSize = +(await this._s3.headObject(uploadParams).promise())
|
||||
.ContentLength
|
||||
if (fileSize < MIN_PART_SIZE) {
|
||||
const resultBuffer = Buffer.alloc(
|
||||
Math.max(fileSize, position + buffer.length)
|
||||
)
|
||||
const fileContent = (await this._s3.getObject(uploadParams).promise())
|
||||
.Body
|
||||
fileContent.copy(resultBuffer)
|
||||
buffer.copy(resultBuffer, position)
|
||||
await this._s3
|
||||
.putObject({ ...uploadParams, Body: resultBuffer })
|
||||
.promise()
|
||||
return { buffer, bytesWritten: buffer.length }
|
||||
} else {
|
||||
// using this trick: https://stackoverflow.com/a/38089437/72637
|
||||
// multipart fragments have a minimum size of 5Mo and a max of 5Go unless they are last
|
||||
// splitting the file in 3 parts: [prefix, edit, suffix]
|
||||
// if `prefix` is bigger than 5Mo, it will be sourced from uploadPartCopy()
|
||||
// otherwise otherwise it will be downloaded, concatenated to `edit`
|
||||
// `edit` will always be an upload part
|
||||
// `suffix` will ways be sourced from uploadPartCopy()
|
||||
const multipartParams = await this._s3
|
||||
.createMultipartUpload(uploadParams)
|
||||
.promise()
|
||||
try {
|
||||
const parts = []
|
||||
const prefixSize = position
|
||||
let suffixOffset = prefixSize + buffer.length
|
||||
let suffixSize = Math.max(0, fileSize - suffixOffset)
|
||||
let hasSuffix = suffixSize > 0
|
||||
let editBuffer = buffer
|
||||
let editBufferOffset = position
|
||||
let partNumber = 1
|
||||
if (prefixSize < MIN_PART_SIZE) {
|
||||
const downloadParams = {
|
||||
...uploadParams,
|
||||
Range: `bytes=0-${prefixSize - 1}`,
|
||||
}
|
||||
const prefixBuffer =
|
||||
prefixSize > 0
|
||||
? (await this._s3.getObject(downloadParams).promise()).Body
|
||||
: Buffer.alloc(0)
|
||||
editBuffer = Buffer.concat([prefixBuffer, buffer])
|
||||
editBufferOffset = 0
|
||||
} else {
|
||||
const fragmentsCount = Math.ceil(prefixSize / MAX_PART_SIZE)
|
||||
const prefixFragmentSize = Math.ceil(prefixSize / fragmentsCount)
|
||||
const lastFragmentSize =
|
||||
prefixFragmentSize * fragmentsCount - prefixSize
|
||||
let prefixPosition = 0
|
||||
for (let i = 0; i < fragmentsCount; i++) {
|
||||
const copyPrefixParams = {
|
||||
...multipartParams,
|
||||
PartNumber: partNumber++,
|
||||
CopySource: `/${this._bucket}/${this._dir + file}`,
|
||||
CopySourceRange: `bytes=${prefixPosition}-${
|
||||
prefixPosition + prefixFragmentSize - 1
|
||||
}`,
|
||||
}
|
||||
const prefixPart = (
|
||||
await this._s3.uploadPartCopy(copyPrefixParams).promise()
|
||||
).CopyPartResult
|
||||
parts.push({
|
||||
ETag: prefixPart.ETag,
|
||||
PartNumber: copyPrefixParams.PartNumber,
|
||||
})
|
||||
prefixPosition += prefixFragmentSize
|
||||
}
|
||||
if (lastFragmentSize) {
|
||||
}
|
||||
}
|
||||
if (hasSuffix && editBuffer.length < MIN_PART_SIZE) {
|
||||
// the edit fragment is too short and is not the last fragment
|
||||
// let's steal from the suffix fragment to reach the minimum size
|
||||
// the suffix might be too short and itself entirely absorbed in the edit fragment, making it the last one.
|
||||
const complementSize = Math.min(
|
||||
MIN_PART_SIZE - editBuffer.length,
|
||||
suffixSize
|
||||
)
|
||||
const complementOffset = editBufferOffset + editBuffer.length
|
||||
suffixOffset += complementSize
|
||||
suffixSize -= complementSize
|
||||
hasSuffix = suffixSize > 0
|
||||
const prefixRange = `bytes=${complementOffset}-${
|
||||
complementOffset + complementSize - 1
|
||||
}`
|
||||
const downloadParams = { ...uploadParams, Range: prefixRange }
|
||||
const complementBuffer = (
|
||||
await this._s3.getObject(downloadParams).promise()
|
||||
).Body
|
||||
editBuffer = Buffer.concat([editBuffer, complementBuffer])
|
||||
}
|
||||
const editParams = {
|
||||
...multipartParams,
|
||||
Body: editBuffer,
|
||||
PartNumber: partNumber++,
|
||||
}
|
||||
const editPart = await this._s3.uploadPart(editParams).promise()
|
||||
parts.push({ ETag: editPart.ETag, PartNumber: editParams.PartNumber })
|
||||
if (hasSuffix) {
|
||||
const suffixFragments = Math.ceil(suffixSize / MAX_PART_SIZE)
|
||||
const suffixFragmentsSize = Math.ceil(suffixSize / suffixFragments)
|
||||
let suffixFragmentOffset = suffixOffset
|
||||
for (let i = 0; i < suffixFragments; i++) {
|
||||
const fragmentEnd = suffixFragmentOffset + suffixFragmentsSize
|
||||
const suffixRange = `bytes=${suffixFragmentOffset}-${
|
||||
Math.min(fileSize, fragmentEnd) - 1
|
||||
}`
|
||||
const copySuffixParams = {
|
||||
...multipartParams,
|
||||
PartNumber: partNumber++,
|
||||
CopySource: `/${this._bucket}/${this._dir + file}`,
|
||||
CopySourceRange: suffixRange,
|
||||
}
|
||||
const suffixPart = (
|
||||
await this._s3.uploadPartCopy(copySuffixParams).promise()
|
||||
).CopyPartResult
|
||||
parts.push({
|
||||
ETag: suffixPart.ETag,
|
||||
PartNumber: copySuffixParams.PartNumber,
|
||||
})
|
||||
suffixFragmentOffset = fragmentEnd
|
||||
}
|
||||
}
|
||||
await this._s3
|
||||
.completeMultipartUpload({
|
||||
...multipartParams,
|
||||
MultipartUpload: { Parts: parts },
|
||||
})
|
||||
.promise()
|
||||
} catch (e) {
|
||||
await this._s3.abortMultipartUpload(multipartParams).promise()
|
||||
throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _openFile(path, flags) {
|
||||
return path
|
||||
}
|
||||
|
||||
async _closeFile(fd) {}
|
||||
}
|
||||
@@ -49,7 +49,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
@@ -1,24 +0,0 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
@@ -1,141 +0,0 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @xen-orchestra/openflow
|
||||
|
||||
[](https://npmjs.org/package/@xen-orchestra/openflow)  [](https://bundlephobia.com/result?p=@xen-orchestra/openflow) [](https://npmjs.org/package/@xen-orchestra/openflow)
|
||||
|
||||
> Pack and unpack OpenFlow messages
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/openflow):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/openflow
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Unpacking a received OpenFlow message from a socket:
|
||||
|
||||
```js
|
||||
import openflow from '@xen-orchestra/openflow'
|
||||
import parse from '@xen-orchestra/openflow/parse-socket'
|
||||
|
||||
const version = openflow.versions.openFlow11
|
||||
const ofProtocol = openflow.protocols[version]
|
||||
|
||||
function parseOpenFlowMessages(socket) {
|
||||
for await (const msg of parse(socket)) {
|
||||
if (msg.header !== undefined) {
|
||||
const ofType = msg.header.type
|
||||
switch (ofType) {
|
||||
case ofProtocol.type.hello:
|
||||
// Handle OFPT_HELLO
|
||||
break
|
||||
case ofProtocol.type.error:
|
||||
// Handle OFPT_ERROR
|
||||
break
|
||||
case ofProtocol.type.echoRequest:
|
||||
// Handle OFPT_ECHO_REQUEST
|
||||
break
|
||||
case ofProtocol.type.packetIn:
|
||||
// Handle OFPT_PACKET_IN
|
||||
break
|
||||
case ofProtocol.type.featuresReply:
|
||||
// Handle OFPT_FEATURES_REPLY
|
||||
break
|
||||
case ofProtocol.type.getConfigReply:
|
||||
// Handle OFPT_GET_CONFIG_REPLY
|
||||
break
|
||||
case ofProtocol.type.portStatus:
|
||||
// Handle OFPT_PORT_STATUS
|
||||
break
|
||||
case ofProtocol.type.flowRemoved:
|
||||
// Handle OFPT_FLOW_REMOVED
|
||||
break
|
||||
default:
|
||||
// Error: Invalid type
|
||||
break
|
||||
}
|
||||
} else {
|
||||
// Error: Message is unparseable
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Unpacking a OpenFlow message from a buffer:
|
||||
|
||||
```js
|
||||
import openflow from '@xen-orchestra/openflow'
|
||||
|
||||
const version = openflow.versions.openFlow11
|
||||
const ofProtocol = openflow.protocols[version]
|
||||
|
||||
function processOpenFlowMessage(buf) {
|
||||
const unpacked = openflow.unpack(buf)
|
||||
const ofType = unpacked.header.type
|
||||
switch (ofType) {
|
||||
case ofProtocol.type.hello:
|
||||
// Handle OFPT_HELLO
|
||||
break
|
||||
case ofProtocol.type.error:
|
||||
// Handle OFPT_ERROR
|
||||
break
|
||||
case ofProtocol.type.echoRequest:
|
||||
// Handle OFPT_ECHO_REQUEST
|
||||
break
|
||||
case ofProtocol.type.packetIn:
|
||||
// Handle OFPT_PACKET_IN
|
||||
break
|
||||
case ofProtocol.type.featuresReply:
|
||||
// Handle OFPT_FEATURES_REPLY
|
||||
break
|
||||
case ofProtocol.type.getConfigReply:
|
||||
// Handle OFPT_GET_CONFIG_REPLY
|
||||
break
|
||||
case ofProtocol.type.portStatus:
|
||||
// Handle OFPT_PORT_STATUS
|
||||
break
|
||||
case ofProtocol.type.flowRemoved:
|
||||
// Handle OFPT_FLOW_REMOVED
|
||||
break
|
||||
default:
|
||||
// Error: Invalid type
|
||||
break
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Packing an OpenFlow OFPT_HELLO message:
|
||||
|
||||
```js
|
||||
import openflow from '@xen-orchestra/openflow'
|
||||
|
||||
const version = openflow.versions.openFlow11
|
||||
const ofProtocol = openflow.protocols[version]
|
||||
|
||||
const buf = openflow.pack({
|
||||
header: {
|
||||
version,
|
||||
type: ofProtocol.type.hello,
|
||||
xid: 1,
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
@@ -1,108 +0,0 @@
|
||||
Unpacking a received OpenFlow message from a socket:
|
||||
|
||||
```js
|
||||
import openflow from '@xen-orchestra/openflow'
|
||||
import parse from '@xen-orchestra/openflow/parse-socket'
|
||||
|
||||
const version = openflow.versions.openFlow11
|
||||
const ofProtocol = openflow.protocols[version]
|
||||
|
||||
function parseOpenFlowMessages(socket) {
|
||||
for await (const msg of parse(socket)) {
|
||||
if (msg.header !== undefined) {
|
||||
const ofType = msg.header.type
|
||||
switch (ofType) {
|
||||
case ofProtocol.type.hello:
|
||||
// Handle OFPT_HELLO
|
||||
break
|
||||
case ofProtocol.type.error:
|
||||
// Handle OFPT_ERROR
|
||||
break
|
||||
case ofProtocol.type.echoRequest:
|
||||
// Handle OFPT_ECHO_REQUEST
|
||||
break
|
||||
case ofProtocol.type.packetIn:
|
||||
// Handle OFPT_PACKET_IN
|
||||
break
|
||||
case ofProtocol.type.featuresReply:
|
||||
// Handle OFPT_FEATURES_REPLY
|
||||
break
|
||||
case ofProtocol.type.getConfigReply:
|
||||
// Handle OFPT_GET_CONFIG_REPLY
|
||||
break
|
||||
case ofProtocol.type.portStatus:
|
||||
// Handle OFPT_PORT_STATUS
|
||||
break
|
||||
case ofProtocol.type.flowRemoved:
|
||||
// Handle OFPT_FLOW_REMOVED
|
||||
break
|
||||
default:
|
||||
// Error: Invalid type
|
||||
break
|
||||
}
|
||||
} else {
|
||||
// Error: Message is unparseable
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Unpacking a OpenFlow message from a buffer:
|
||||
|
||||
```js
|
||||
import openflow from '@xen-orchestra/openflow'
|
||||
|
||||
const version = openflow.versions.openFlow11
|
||||
const ofProtocol = openflow.protocols[version]
|
||||
|
||||
function processOpenFlowMessage(buf) {
|
||||
const unpacked = openflow.unpack(buf)
|
||||
const ofType = unpacked.header.type
|
||||
switch (ofType) {
|
||||
case ofProtocol.type.hello:
|
||||
// Handle OFPT_HELLO
|
||||
break
|
||||
case ofProtocol.type.error:
|
||||
// Handle OFPT_ERROR
|
||||
break
|
||||
case ofProtocol.type.echoRequest:
|
||||
// Handle OFPT_ECHO_REQUEST
|
||||
break
|
||||
case ofProtocol.type.packetIn:
|
||||
// Handle OFPT_PACKET_IN
|
||||
break
|
||||
case ofProtocol.type.featuresReply:
|
||||
// Handle OFPT_FEATURES_REPLY
|
||||
break
|
||||
case ofProtocol.type.getConfigReply:
|
||||
// Handle OFPT_GET_CONFIG_REPLY
|
||||
break
|
||||
case ofProtocol.type.portStatus:
|
||||
// Handle OFPT_PORT_STATUS
|
||||
break
|
||||
case ofProtocol.type.flowRemoved:
|
||||
// Handle OFPT_FLOW_REMOVED
|
||||
break
|
||||
default:
|
||||
// Error: Invalid type
|
||||
break
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Packing an OpenFlow OFPT_HELLO message:
|
||||
|
||||
```js
|
||||
import openflow from '@xen-orchestra/openflow'
|
||||
|
||||
const version = openflow.versions.openFlow11
|
||||
const ofProtocol = openflow.protocols[version]
|
||||
|
||||
const buf = openflow.pack({
|
||||
header: {
|
||||
version,
|
||||
type: ofProtocol.type.hello,
|
||||
xid: 1,
|
||||
},
|
||||
})
|
||||
```
|
||||
@@ -1,40 +0,0 @@
|
||||
{
|
||||
"description": "Pack and unpack OpenFlow messages",
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/openflow",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/openflow",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/openflow",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"main": "dist/",
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"postversion": "npm publish --access public",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.7.4",
|
||||
"@babel/core": "^7.7.4",
|
||||
"@babel/preset-env": "^7.7.4",
|
||||
"cross": "^1.0.0",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/read-chunk": "^0.1.0"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
module.exports = require('./dist/parse-socket')
|
||||
@@ -1,9 +0,0 @@
|
||||
export default {
|
||||
size: 8,
|
||||
offsets: {
|
||||
version: 0,
|
||||
type: 1,
|
||||
length: 2,
|
||||
xid: 4,
|
||||
},
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
import get from './util/get-from-map'
|
||||
import ofVersion from './version'
|
||||
// TODO: More openflow versions
|
||||
import of11 from './openflow-11/index'
|
||||
import scheme from './default-header-scheme'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OPENFLOW = {
|
||||
[ofVersion.openFlow11]: of11,
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
versions: ofVersion,
|
||||
protocols: { [ofVersion.openFlow11]: of11.protocol },
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
pack: object => {
|
||||
const version = object.header.version
|
||||
return get(
|
||||
OPENFLOW,
|
||||
version,
|
||||
`Unsupported OpenFlow version: ${version}`
|
||||
).pack(object)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const version = buffer.readUInt8(offset + scheme.offsets.version)
|
||||
return get(
|
||||
OPENFLOW,
|
||||
version,
|
||||
`Unsupported OpenFlow version: ${version}`
|
||||
).unpack(buffer, offset)
|
||||
},
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
import get from '../../util/get-from-map'
|
||||
|
||||
import ofOutput from './output'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const ACTION = {
|
||||
[of.actionType.output]: ofOutput,
|
||||
/* TODO:
|
||||
[of.actionType.group]: ,
|
||||
[of.actionType.setVlanId]: ,
|
||||
[of.actionType.setVlanPcp]: ,
|
||||
[of.actionType.setDlSrc]: ,
|
||||
[of.actionType.setDlDst]: ,
|
||||
[of.actionType.setNwSrc]: ,
|
||||
[of.actionType.setNwDst]: ,
|
||||
[of.actionType.setNwTos]: ,
|
||||
[of.actionType.setNwEcn]: ,
|
||||
[of.actionType.setTpSrc]: ,
|
||||
[of.actionType.setTpDst]: ,
|
||||
[of.actionType.copyTtlOut]: ,
|
||||
[of.actionType.copyTtlIn]: ,
|
||||
[of.actionType.setMplsLabel]: ,
|
||||
[of.actionType.setMplsTc]: ,
|
||||
[of.actionType.setMplsTtl]: ,
|
||||
[of.actionType.decMplsTtl]: ,
|
||||
[of.actionType.pushVlan]: ,
|
||||
[of.actionType.popVlan]: ,
|
||||
[of.actionType.pushMpls]: ,
|
||||
[of.actionType.popMpls]: ,
|
||||
[of.actionType.setQueue]: ,
|
||||
[of.actionType.setNwTtl]: ,
|
||||
[of.actionType.decNwTtl]: ,
|
||||
[of.actionType.experimenter]:
|
||||
*/
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
const { type } = object
|
||||
return get(ACTION, type, `Invalid action type: ${type}`).pack(
|
||||
object,
|
||||
buffer,
|
||||
offset
|
||||
)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const type = buffer.readUInt16BE(offset + of.offsets.actionHeader.type)
|
||||
return get(ACTION, type, `Invalid action type: ${type}`).unpack(
|
||||
buffer,
|
||||
offset
|
||||
)
|
||||
},
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.actionOutput
|
||||
|
||||
const PAD_LENGTH = 6
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
assert(object.type === of.actionType.output)
|
||||
object.len = of.sizes.actionOutput
|
||||
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.len)
|
||||
|
||||
buffer.writeUInt16BE(object.type, offset + OFFSETS.type)
|
||||
buffer.writeUInt16BE(object.len, offset + OFFSETS.len)
|
||||
|
||||
buffer.writeUInt32BE(object.port, offset + OFFSETS.port)
|
||||
buffer.writeUInt16BE(object.max_len, offset + OFFSETS.maxLen)
|
||||
|
||||
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const object = {}
|
||||
|
||||
object.type = buffer.readUInt16BE(offset + OFFSETS.type)
|
||||
assert(object.type === of.actionType.output)
|
||||
|
||||
object.len = buffer.readUInt16BE(offset + OFFSETS.len)
|
||||
assert(object.len === of.sizes.actionOutput)
|
||||
|
||||
object.port = buffer.readUInt32BE(offset + OFFSETS.port)
|
||||
object.max_len = buffer.readUInt16BE(offset + OFFSETS.maxLen)
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
import get from '../util/get-from-map'
|
||||
|
||||
import echo from './message/echo'
|
||||
import error from './message/error'
|
||||
import hello from './message/hello'
|
||||
import featuresRequest from './message/features-request'
|
||||
import featuresReply from './message/features-reply'
|
||||
import getConfigRequest from './message/get-config-request'
|
||||
import switchConfig from './message/switch-config'
|
||||
import flowMod from './message/flow-mod'
|
||||
import of from './openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const MESSAGE = {
|
||||
[of.type.hello]: hello,
|
||||
[of.type.error]: error,
|
||||
[of.type.featuresRequest]: featuresRequest,
|
||||
[of.type.featuresReply]: featuresReply,
|
||||
[of.type.echoRequest]: echo,
|
||||
[of.type.echoReply]: echo,
|
||||
[of.type.getConfigRequest]: getConfigRequest,
|
||||
[of.type.getConfigReply]: switchConfig,
|
||||
[of.type.setConfig]: switchConfig,
|
||||
[of.type.flowMod]: flowMod,
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
protocol: of,
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
pack: object => {
|
||||
const type = object.header.type
|
||||
return get(MESSAGE, type, `Invalid OpenFlow message type: ${type}`).pack(
|
||||
object
|
||||
)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const type = buffer.readUInt8(offset + of.offsets.header.type)
|
||||
return get(MESSAGE, type, `Invalid OpenFlow message type: ${type}`).unpack(
|
||||
buffer,
|
||||
offset
|
||||
)
|
||||
},
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import get from '../../util/get-from-map'
|
||||
|
||||
import ofAction from '../action/action'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const SIZES = {
|
||||
[of.actionType.output]: of.sizes.actionOutput,
|
||||
[of.actionType.group]: of.sizes.actionGroup,
|
||||
[of.actionType.setVlanId]: of.sizes.actionVlanId,
|
||||
[of.actionType.setVlanPcp]: of.sizes.actionVlanPcp,
|
||||
[of.actionType.setDlSrc]: of.sizes.actionDlAddr,
|
||||
[of.actionType.setDlDst]: of.sizes.actionDlAddr,
|
||||
[of.actionType.setNwSrc]: of.sizes.actionNwAddr,
|
||||
[of.actionType.setNwDst]: of.sizes.actionNwAddr,
|
||||
[of.actionType.setNwTos]: of.sizes.actionNwTos,
|
||||
[of.actionType.setNwEcn]: of.sizes.actionNwEcn,
|
||||
[of.actionType.setTpSrc]: of.sizes.actionTpPort,
|
||||
[of.actionType.setTpDst]: of.sizes.actionTpPort,
|
||||
[of.actionType.copyTtlOut]: of.sizes.actionHeader,
|
||||
[of.actionType.copyTtlIn]: of.sizes.actionHeader,
|
||||
[of.actionType.setMplsLabel]: of.sizes.actionMplsLabel,
|
||||
[of.actionType.setMplsTc]: of.sizes.actionMplsTc,
|
||||
[of.actionType.setMplsTtl]: of.sizes.actionMplsTtl,
|
||||
[of.actionType.decMplsTtl]: of.sizes.actionMplsTtl,
|
||||
[of.actionType.pushVlan]: of.sizes.actionPush,
|
||||
[of.actionType.popVlan]: of.sizes.actionHeader,
|
||||
[of.actionType.pushMpls]: of.sizes.actionPush,
|
||||
[of.actionType.popMpls]: of.sizes.actionPopMpls,
|
||||
[of.actionType.setQueue]: of.sizes.actionSetQueue,
|
||||
[of.actionType.setNwTtl]: of.sizes.actionNwTtl,
|
||||
[of.actionType.decNwTtl]: of.sizes.actionNwTtl,
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const TYPES = [
|
||||
of.instructionType.clearActions,
|
||||
of.instructionType.writeActions,
|
||||
of.instructionType.applyActions,
|
||||
]
|
||||
const OFFSETS = of.offsets.instructionActions
|
||||
|
||||
const PAD_LENGTH = 4
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
const { type } = object
|
||||
assert(TYPES.includes(type))
|
||||
object.len = of.sizes.instructionActions
|
||||
const { actions = [] } = object
|
||||
actions.forEach(action => {
|
||||
assert(Object.values(of.actionType).includes(action.type))
|
||||
// TODO: manage experimenter
|
||||
object.len += get(
|
||||
SIZES,
|
||||
action.type,
|
||||
`Invalid action type: ${action.type}`
|
||||
)
|
||||
})
|
||||
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.len)
|
||||
|
||||
buffer.writeUInt16BE(type, offset + OFFSETS.type)
|
||||
buffer.writeUInt16BE(object.len, offset + OFFSETS.len)
|
||||
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
|
||||
|
||||
let actionOffset = offset + OFFSETS.actions
|
||||
actions.forEach(action => {
|
||||
ofAction.pack(action, buffer, actionOffset)
|
||||
actionOffset += SIZES[action.type]
|
||||
})
|
||||
},
|
||||
|
||||
unpack: (buffer = undefined, offset = 0) => {
|
||||
const type = buffer.readUInt16BE(offset + OFFSETS.type)
|
||||
assert(TYPES.includes(type))
|
||||
|
||||
const object = { type }
|
||||
object.len = buffer.readUInt16BE(offset + OFFSETS.len)
|
||||
|
||||
if (type === of.instructionType.clearActions) {
|
||||
// No actions for this type
|
||||
return object
|
||||
}
|
||||
|
||||
object.actions = []
|
||||
let actionOffset = offset + OFFSETS.actions
|
||||
while (actionOffset < object.len) {
|
||||
const action = ofAction.unpack(buffer, actionOffset)
|
||||
actionOffset += action.len
|
||||
object.actions.push(action)
|
||||
}
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
import get from '../../util/get-from-map'
|
||||
|
||||
import actions from './actions'
|
||||
// import goToTable from './goToTable'
|
||||
import of from '../openflow-11'
|
||||
// import writeMetadata from './writeMetadata'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const INSTRUCTION = {
|
||||
/* TODO:
|
||||
[of.instructionType.goToTable]: goToTable,
|
||||
[of.instructionType.writeMetadata]: writeMetadata,
|
||||
*/
|
||||
[of.instructionType.writeActions]: actions,
|
||||
[of.instructionType.applyActions]: actions,
|
||||
[of.instructionType.clearActions]: actions,
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const OFFSETS = of.offsets.instruction
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
const { type } = object
|
||||
return get(INSTRUCTION, type, `Invalid instruction type: ${type}`).pack(
|
||||
object,
|
||||
buffer,
|
||||
offset
|
||||
)
|
||||
},
|
||||
|
||||
unpack: (buffer = undefined, offset = 0) => {
|
||||
const type = buffer.readUInt16BE(offset + OFFSETS.type)
|
||||
return get(INSTRUCTION, type, `Invalid instruction type: ${type}`).unpack(
|
||||
buffer,
|
||||
offset
|
||||
)
|
||||
},
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
import assert from 'assert'
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.echo
|
||||
const TYPES = [of.type.echoRequest, of.type.echoReply]
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: object => {
|
||||
const { header, data } = object
|
||||
assert(TYPES.includes(header.type))
|
||||
const dataSize = data !== undefined ? data.length : 0
|
||||
header.length = of.sizes.header + dataSize
|
||||
|
||||
const buffer = Buffer.alloc(header.length)
|
||||
ofHeader.pack(header, buffer, OFFSETS.header)
|
||||
if (dataSize > 0) {
|
||||
data.copy(buffer, OFFSETS.data, 0, dataSize)
|
||||
}
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
|
||||
assert(TYPES.includes(header.type))
|
||||
|
||||
const object = { header }
|
||||
const dataSize = header.length - of.sizes.header
|
||||
if (dataSize > 0) {
|
||||
object.data = Buffer.alloc(dataSize)
|
||||
buffer.copy(
|
||||
object.data,
|
||||
0,
|
||||
offset + OFFSETS.data,
|
||||
offset + OFFSETS.data + dataSize
|
||||
)
|
||||
}
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import get from '../../util/get-from-map'
|
||||
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const ERROR_CODE = {
|
||||
[of.errorType.helloFailed]: of.helloFailedCode,
|
||||
[of.errorType.badRequest]: of.badRequestCode,
|
||||
[of.errorType.badAction]: of.badActionCode,
|
||||
[of.errorType.badInstruction]: of.badInstructionCode,
|
||||
[of.errorType.badMatch]: of.badMatchCode,
|
||||
[of.errorType.flowModFailed]: of.flowModFailedCode,
|
||||
[of.errorType.groupModFailed]: of.groupModFailedCode,
|
||||
[of.errorType.portModFailed]: of.portModFailedCode,
|
||||
[of.errorType.tableModFailed]: of.tableModFailedCode,
|
||||
[of.errorType.queueOpFailed]: of.queueOpFailedCode,
|
||||
[of.errorType.switchConfigFailed]: of.switchConfigFailedCode,
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const OFFSETS = of.offsets.errorMsg
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
const { header, type, code, data } = object
|
||||
assert(header.type === of.type.error)
|
||||
const errorCodes = get(ERROR_CODE, type, `Invalid error type: ${type}`)
|
||||
assert(Object.values(errorCodes).includes(code))
|
||||
|
||||
object.length = of.sizes.errorMsg
|
||||
if (data !== undefined) {
|
||||
object.length += data.length
|
||||
}
|
||||
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.length)
|
||||
|
||||
ofHeader.pack(header, buffer, offset + OFFSETS.header)
|
||||
buffer.writeUInt16BE(type, offset + OFFSETS.type)
|
||||
buffer.writeUInt16BE(code, offset + OFFSETS.code)
|
||||
|
||||
if (data !== undefined) {
|
||||
data.copy(buffer, offset + OFFSETS.data, 0, data.length)
|
||||
}
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
|
||||
assert(header.type === of.type.error)
|
||||
|
||||
const type = buffer.readUInt16BE(offset + OFFSETS.type)
|
||||
const errorCodes = get(ERROR_CODE, type, `Invalid error type: ${type}`)
|
||||
|
||||
const code = buffer.readUInt16BE(offset + OFFSETS.code)
|
||||
assert(Object.values(errorCodes).includes(code))
|
||||
|
||||
const object = { header, type, code }
|
||||
const dataSize = header.length - of.sizes.errorMsg
|
||||
if (dataSize > 0) {
|
||||
object.data = Buffer.alloc(dataSize)
|
||||
buffer.copy(
|
||||
object.data,
|
||||
0,
|
||||
offset + OFFSETS.data,
|
||||
offset + OFFSETS.data + dataSize
|
||||
)
|
||||
}
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import ofHeader from './header'
|
||||
import ofPort from '../struct/port'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.switchFeatures
|
||||
const PAD_LENGTH = 3
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: object => {
|
||||
const {
|
||||
header,
|
||||
datapath_id: did,
|
||||
n_buffers: nBufs,
|
||||
n_tables: nTables,
|
||||
capabilities,
|
||||
reserved,
|
||||
ports,
|
||||
} = object
|
||||
assert(header.type === of.type.featuresReply)
|
||||
|
||||
header.length = of.sizes.switchFeatures + ports.length * of.sizes.port
|
||||
|
||||
const buffer = Buffer.alloc(header.length)
|
||||
ofHeader.pack(header, buffer, OFFSETS.header)
|
||||
|
||||
buffer.writeBigUInt64BE(did, OFFSETS.datapathId)
|
||||
buffer.writeUInt32BE(nBufs, OFFSETS.nBuffers)
|
||||
buffer.writeUInt8(nTables, OFFSETS.nTables)
|
||||
buffer.fill(0, OFFSETS.pad, OFFSETS.pad + PAD_LENGTH)
|
||||
buffer.writeUInt32BE(capabilities, OFFSETS.capabilities)
|
||||
buffer.writeUInt32BE(reserved, OFFSETS.reserved)
|
||||
|
||||
let portsOffset = 0
|
||||
ports.forEach(port => {
|
||||
ofPort.pack(port, buffer, OFFSETS.ports + portsOffset++ * of.sizes.port)
|
||||
})
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
|
||||
assert(header.type === of.type.featuresReply)
|
||||
|
||||
const object = { header }
|
||||
object.datapath_id = buffer.toString(
|
||||
'hex',
|
||||
offset + OFFSETS.datapathId,
|
||||
offset + OFFSETS.datapathId + 8
|
||||
)
|
||||
object.n_buffers = buffer.readUInt32BE(offset + OFFSETS.nBuffers)
|
||||
object.n_tables = buffer.readUInt8(offset + OFFSETS.nTables)
|
||||
|
||||
object.capabilities = buffer.readUInt32BE(offset + OFFSETS.capabilities)
|
||||
object.reserved = buffer.readUInt32BE(offset + OFFSETS.reserved)
|
||||
|
||||
object.ports = []
|
||||
const nPorts = (header.length - of.sizes.switchFeatures) / of.sizes.port
|
||||
for (let i = 0; i < nPorts; ++i) {
|
||||
object.ports.push(
|
||||
ofPort.unpack(buffer, offset + OFFSETS.ports + i * of.sizes.port)
|
||||
)
|
||||
}
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: object => {
|
||||
const { header } = object
|
||||
assert(header.type === of.type.featuresRequest)
|
||||
header.length = of.sizes.featuresRequest
|
||||
|
||||
return ofHeader.pack(header)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset)
|
||||
assert(header.type === of.type.featuresRequest)
|
||||
assert(header.length === of.sizes.featuresRequest)
|
||||
|
||||
return { header }
|
||||
},
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import get from '../../util/get-from-map'
|
||||
import ofInstruction from '../instruction/instruction'
|
||||
import uIntHelper from '../../util/uint-helper'
|
||||
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
import ofMatch from '../struct/match/match'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const INSTRUCTION_SIZE = {
|
||||
[of.instructionType.goToTable]: of.sizes.instructionWriteMetadata,
|
||||
[of.instructionType.writeMetadata]: of.sizes.instructionGotoTable,
|
||||
[of.instructionType.clearActions]: of.sizes.instructionActions,
|
||||
[of.instructionType.writeActions]: of.sizes.instructionActions,
|
||||
[of.instructionType.applyActions]: of.sizes.instructionActions,
|
||||
}
|
||||
|
||||
const ACTION_SIZE = {
|
||||
[of.actionType.output]: of.sizes.actionOutput,
|
||||
[of.actionType.group]: of.sizes.actionGroup,
|
||||
[of.actionType.setVlanId]: of.sizes.actionVlanId,
|
||||
[of.actionType.setVlanPcp]: of.sizes.actionVlanPcp,
|
||||
[of.actionType.setDlSrc]: of.sizes.actionDlAddr,
|
||||
[of.actionType.setDlDst]: of.sizes.actionDlAddr,
|
||||
[of.actionType.setNwSrc]: of.sizes.actionNwAddr,
|
||||
[of.actionType.setNwDst]: of.sizes.actionNwAddr,
|
||||
[of.actionType.setNwTos]: of.sizes.actionNwTos,
|
||||
[of.actionType.setNwEcn]: of.sizes.actionNwEcn,
|
||||
[of.actionType.setTpSrc]: of.sizes.actionTpPort,
|
||||
[of.actionType.setTpDst]: of.sizes.actionTpPort,
|
||||
[of.actionType.copyTtlOut]: of.sizes.actionHeader,
|
||||
[of.actionType.copyTtlIn]: of.sizes.actionHeader,
|
||||
[of.actionType.setMplsLabel]: of.sizes.actionMplsLabel,
|
||||
[of.actionType.setMplsTc]: of.sizes.actionMplsTc,
|
||||
[of.actionType.setMplsTtl]: of.sizes.actionMplsTtl,
|
||||
[of.actionType.decMplsTtl]: of.sizes.actionMplsTtl,
|
||||
[of.actionType.pushVlan]: of.sizes.actionPush,
|
||||
[of.actionType.popVlan]: of.sizes.actionHeader,
|
||||
[of.actionType.pushMpls]: of.sizes.actionPush,
|
||||
[of.actionType.popMpls]: of.sizes.actionPopMpls,
|
||||
[of.actionType.setQueue]: of.sizes.actionSetQueue,
|
||||
[of.actionType.setNwTtl]: of.sizes.actionNwTtl,
|
||||
[of.actionType.decNwTtl]: of.sizes.actionNwTtl,
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const OFFSETS = of.offsets.flowMod
|
||||
|
||||
const COOKIE_LENGTH = 8
|
||||
const PAD_LENGTH = 2
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
const {
|
||||
header,
|
||||
cookie,
|
||||
cookie_mask,
|
||||
table_id = 0,
|
||||
command,
|
||||
idle_timeout = 0,
|
||||
hard_timeout = 0,
|
||||
priority = of.defaultPriority,
|
||||
buffer_id = 0xffffffff,
|
||||
out_port = of.port.any,
|
||||
out_group = of.group.any,
|
||||
flags = 0,
|
||||
match,
|
||||
instructions = [],
|
||||
} = object
|
||||
// fill header length
|
||||
header.length = of.sizes.flowMod
|
||||
instructions.forEach(instruction => {
|
||||
header.length += get(
|
||||
INSTRUCTION_SIZE,
|
||||
instruction.type,
|
||||
`Invalid instruction type: ${instruction.type}`
|
||||
)
|
||||
const { actions = [] } = instruction
|
||||
actions.forEach(action => {
|
||||
header.length += get(
|
||||
ACTION_SIZE,
|
||||
action.type,
|
||||
`Invalid instruction type: ${action.type}`
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(header.length)
|
||||
|
||||
ofHeader.pack(header, buffer, offset + OFFSETS.header)
|
||||
|
||||
if (cookie !== undefined) {
|
||||
if (cookie_mask !== undefined) {
|
||||
cookie_mask.copy(buffer, offset + OFFSETS.cookieMask)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.cookie_mask,
|
||||
offset + OFFSETS.cookieMask + COOKIE_LENGTH
|
||||
)
|
||||
}
|
||||
cookie.copy(buffer, offset + OFFSETS.cookie)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.cookie,
|
||||
offset + OFFSETS.cookie + COOKIE_LENGTH
|
||||
)
|
||||
buffer.fill(
|
||||
0xff,
|
||||
offset + OFFSETS.cookieMask,
|
||||
offset + OFFSETS.cookieMask + COOKIE_LENGTH
|
||||
)
|
||||
}
|
||||
|
||||
buffer.writeUInt8(table_id, offset + OFFSETS.tableId)
|
||||
assert(Object.values(of.flowModCommand).includes(command))
|
||||
buffer.writeUInt8(command, offset + OFFSETS.command)
|
||||
buffer.writeUInt16BE(idle_timeout, offset + OFFSETS.idleTimeout)
|
||||
buffer.writeUInt16BE(hard_timeout, offset + OFFSETS.hardTimeout)
|
||||
buffer.writeUInt16BE(priority, offset + OFFSETS.priority)
|
||||
buffer.writeUInt32BE(buffer_id, offset + OFFSETS.bufferId)
|
||||
buffer.writeUInt32BE(out_port, offset + OFFSETS.outPort)
|
||||
buffer.writeUInt32BE(out_group, offset + OFFSETS.outGroup)
|
||||
buffer.writeUInt16BE(flags, offset + OFFSETS.flags)
|
||||
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
|
||||
|
||||
ofMatch.pack(match, buffer, offset + OFFSETS.match)
|
||||
|
||||
let instructionOffset = offset + OFFSETS.instructions
|
||||
instructions.forEach(instruction => {
|
||||
ofInstruction.pack(instruction, buffer, instructionOffset)
|
||||
instructionOffset += instruction.len
|
||||
})
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
|
||||
assert(header.type === of.type.flowMod)
|
||||
|
||||
const object = { header }
|
||||
|
||||
object.cookie = Buffer.alloc(COOKIE_LENGTH)
|
||||
buffer.copy(
|
||||
object.cookie,
|
||||
0,
|
||||
offset + OFFSETS.cookie,
|
||||
offset + OFFSETS.cookie + COOKIE_LENGTH
|
||||
)
|
||||
if (
|
||||
!uIntHelper.isUInt64None([
|
||||
buffer.readUInt32BE(offset + OFFSETS.cookieMask),
|
||||
buffer.readUInt32BE(offset + OFFSETS.cookieMask + COOKIE_LENGTH / 2),
|
||||
])
|
||||
) {
|
||||
object.cookie_mask = Buffer.alloc(COOKIE_LENGTH)
|
||||
buffer.copy(
|
||||
object.cookie_mask,
|
||||
0,
|
||||
offset + OFFSETS.cookieMask,
|
||||
offset + OFFSETS.cookieMask + COOKIE_LENGTH
|
||||
)
|
||||
}
|
||||
|
||||
object.table_id = buffer.readUInt8(offset + OFFSETS.tableId)
|
||||
object.command = buffer.readUInt8(offset + OFFSETS.command)
|
||||
assert(Object.values(of.flowModCommand).includes(object.command))
|
||||
|
||||
object.idle_timeout = buffer.readUInt16BE(offset + OFFSETS.idleTimeout)
|
||||
object.hard_timeout = buffer.readUInt16BE(offset + OFFSETS.hardTimeout)
|
||||
object.priority = buffer.readUInt16BE(offset + OFFSETS.priority)
|
||||
object.buffer_id = buffer.readUInt32BE(offset + OFFSETS.bufferId)
|
||||
object.out_port = buffer.readUInt32BE(offset + OFFSETS.outPort)
|
||||
object.out_group = buffer.readUInt32BE(offset + OFFSETS.outGroup)
|
||||
object.flags = buffer.readUInt16BE(offset + OFFSETS.flags)
|
||||
|
||||
object.match = ofMatch.unpack(buffer, offset + OFFSETS.match)
|
||||
|
||||
object.instructions = []
|
||||
let instructionOffset = offset + OFFSETS.instructions
|
||||
while (instructionOffset < header.length) {
|
||||
const instruction = ofInstruction.unpack(buffer, instructionOffset)
|
||||
object.instructions.push(instruction)
|
||||
instructionOffset += instruction.len
|
||||
}
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: object => {
|
||||
const { header } = object
|
||||
assert(header.type === of.type.getConfigRequest)
|
||||
header.length = of.sizes.header
|
||||
|
||||
return ofHeader.pack(header)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset)
|
||||
assert(header.type === of.type.getConfigRequest)
|
||||
assert(header.length === of.sizes.header)
|
||||
|
||||
return { header }
|
||||
},
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.header
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(of.sizes.header)
|
||||
const { version, type, length, xid } = object
|
||||
|
||||
assert(version === of.version)
|
||||
assert(Object.values(of.type).includes(type))
|
||||
|
||||
buffer.writeUInt8(version, offset + OFFSETS.version)
|
||||
buffer.writeUInt8(type, offset + OFFSETS.type)
|
||||
buffer.writeUInt16BE(length, offset + OFFSETS.length)
|
||||
buffer.writeUInt32BE(xid, offset + OFFSETS.xid)
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const version = buffer.readUInt8(offset + OFFSETS.version)
|
||||
assert(version === of.version)
|
||||
|
||||
const type = buffer.readUInt8(offset + OFFSETS.type)
|
||||
assert(Object.values(of.type).includes(type))
|
||||
|
||||
const length = buffer.readUInt16BE(offset + OFFSETS.length)
|
||||
const xid = buffer.readUInt32BE(offset + OFFSETS.xid)
|
||||
|
||||
return { version, type, length, xid }
|
||||
},
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.hello
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: object => {
|
||||
const { header } = object
|
||||
assert(header.type === of.type.hello)
|
||||
header.length = of.sizes.hello
|
||||
|
||||
return ofHeader.pack(header)
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
|
||||
assert(header.type === of.type.hello)
|
||||
|
||||
return { header }
|
||||
},
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import ofHeader from './header'
|
||||
import of from '../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.switchConfig
|
||||
const TYPES = [of.type.getConfigReply, of.type.setConfig]
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: object => {
|
||||
const { header, flags, miss_send_len } = object
|
||||
assert(TYPES.includes(header.type))
|
||||
header.length = of.sizes.switchConfig
|
||||
|
||||
const buffer = Buffer.alloc(header.length)
|
||||
ofHeader.pack(header, buffer, OFFSETS.header)
|
||||
|
||||
buffer.writeUInt16BE(flags, OFFSETS.flags)
|
||||
buffer.writeUInt16BE(miss_send_len, OFFSETS.missSendLen)
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const header = ofHeader.unpack(buffer, offset + OFFSETS.header)
|
||||
assert(TYPES.includes(header.type))
|
||||
assert(header.length === of.sizes.switchConfig)
|
||||
|
||||
const flags = buffer.readUInt16BE(offset + OFFSETS.flags)
|
||||
const miss_send_len = buffer.readUInt16BE(offset + OFFSETS.missSendLen)
|
||||
|
||||
return { header, flags, miss_send_len }
|
||||
},
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,374 +0,0 @@
|
||||
import assert from 'assert'
|
||||
import addressParser from '../../../util/addrress-parser'
|
||||
import uIntHelper from '../../../util/uint-helper'
|
||||
import of from '../../openflow-11'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.match
|
||||
const WILDCARDS = of.flowWildcards
|
||||
|
||||
const IP4_ADDR_LEN = 4
|
||||
const METADATA_LENGTH = 8
|
||||
const PAD_LENGTH = 1
|
||||
const PAD2_LENGTH = 3
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
assert(object.type === of.matchType.standard)
|
||||
object.length = of.sizes.match
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(object.length)
|
||||
|
||||
buffer.writeUInt16BE(object.type, offset + OFFSETS.type)
|
||||
buffer.writeUInt16BE(object.length, offset + OFFSETS.length)
|
||||
|
||||
let wildcards = 0
|
||||
let inPort = 0
|
||||
if (object.in_port !== undefined) {
|
||||
inPort = object.in_port
|
||||
} else {
|
||||
wildcards |= WILDCARDS.inPort
|
||||
}
|
||||
buffer.writeUInt32BE(inPort, offset + OFFSETS.inPort)
|
||||
|
||||
if (object.dl_src !== undefined) {
|
||||
if (object.dl_src_mask !== undefined) {
|
||||
addressParser.stringToEth(
|
||||
object.dl_src_mask,
|
||||
buffer,
|
||||
offset + OFFSETS.dlSrcMask
|
||||
)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.dlSrcMask,
|
||||
offset + OFFSETS.dlSrcMask + of.ethAddrLen
|
||||
)
|
||||
}
|
||||
addressParser.stringToEth(object.dl_src, buffer, offset + OFFSETS.dlSrc)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.dlSrc,
|
||||
offset + OFFSETS.dlSrc + of.ethAddrLen
|
||||
)
|
||||
buffer.fill(
|
||||
0xff,
|
||||
offset + OFFSETS.dlSrcMask,
|
||||
offset + OFFSETS.dlSrcMask + of.ethAddrLen
|
||||
)
|
||||
}
|
||||
|
||||
if (object.dl_dst !== undefined) {
|
||||
if (object.dl_dst_mask !== undefined) {
|
||||
addressParser.stringToEth(
|
||||
object.dl_dst_mask,
|
||||
buffer,
|
||||
offset + OFFSETS.dlDstMask
|
||||
)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.dlDstMask,
|
||||
offset + OFFSETS.dlDstMask + of.ethAddrLen
|
||||
)
|
||||
}
|
||||
addressParser.stringToEth(object.dl_dst, buffer, offset + OFFSETS.dlDst)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.dlDst,
|
||||
offset + OFFSETS.dlDst + of.ethAddrLen
|
||||
)
|
||||
buffer.fill(
|
||||
0xff,
|
||||
offset + OFFSETS.dlDstMask,
|
||||
offset + OFFSETS.dlDstMask + of.ethAddrLen
|
||||
)
|
||||
}
|
||||
|
||||
let dlVlan = 0
|
||||
if (object.dl_vlan !== undefined) {
|
||||
dlVlan = object.dl_vlan
|
||||
} else {
|
||||
wildcards |= WILDCARDS.dlVlan
|
||||
}
|
||||
buffer.writeUInt16BE(dlVlan, offset + OFFSETS.dlVlan)
|
||||
|
||||
let dlVlanPcp = 0
|
||||
if (object.dl_vlan_pcp !== undefined) {
|
||||
dlVlanPcp = object.dl_vlan_pcp
|
||||
} else {
|
||||
wildcards |= WILDCARDS.dlVlanPcp
|
||||
}
|
||||
buffer.writeUInt8(dlVlanPcp, offset + OFFSETS.dlVlanPcp)
|
||||
|
||||
buffer.fill(0, offset + OFFSETS.pad1, offset + OFFSETS.pad1 + PAD_LENGTH)
|
||||
|
||||
let dlType = 0
|
||||
if (object.dl_type !== undefined) {
|
||||
dlType = object.dl_type
|
||||
} else {
|
||||
wildcards |= WILDCARDS.dlType
|
||||
}
|
||||
buffer.writeUInt16BE(dlType, offset + OFFSETS.dlType)
|
||||
|
||||
let nwTos = 0
|
||||
if (object.nw_tos !== undefined) {
|
||||
nwTos = object.nw_tos
|
||||
} else {
|
||||
wildcards |= WILDCARDS.nwTos
|
||||
}
|
||||
buffer.writeUInt8(nwTos, offset + OFFSETS.nwTos)
|
||||
|
||||
let nwProto = 0
|
||||
if (object.nw_proto !== undefined) {
|
||||
nwProto = object.nw_proto
|
||||
} else {
|
||||
wildcards |= WILDCARDS.nwProto
|
||||
}
|
||||
buffer.writeUInt8(nwProto, offset + OFFSETS.nwProto)
|
||||
|
||||
if (object.nw_src !== undefined) {
|
||||
if (object.nw_src_mask !== undefined) {
|
||||
addressParser.stringToip4(
|
||||
object.nw_src_mask,
|
||||
buffer,
|
||||
offset + OFFSETS.nwSrcMask
|
||||
)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.nwSrcMask,
|
||||
offset + OFFSETS.nwSrcMask + IP4_ADDR_LEN
|
||||
)
|
||||
}
|
||||
addressParser.stringToip4(object.nw_src, buffer, offset + OFFSETS.nwSrc)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.nwSrc,
|
||||
offset + OFFSETS.nwSrc + IP4_ADDR_LEN
|
||||
)
|
||||
buffer.fill(
|
||||
0xff,
|
||||
offset + OFFSETS.nwSrcMask,
|
||||
offset + OFFSETS.nwSrcMask + IP4_ADDR_LEN
|
||||
)
|
||||
}
|
||||
|
||||
if (object.nw_dst !== undefined) {
|
||||
if (object.nw_dst_mask !== undefined) {
|
||||
addressParser.stringToip4(
|
||||
object.nw_dst_mask,
|
||||
buffer,
|
||||
offset + OFFSETS.nwDstMask
|
||||
)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.nwDstMask,
|
||||
offset + OFFSETS.nwDstMask + IP4_ADDR_LEN
|
||||
)
|
||||
}
|
||||
addressParser.stringToip4(object.nw_dst, buffer, offset + OFFSETS.nwDst)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.nwDst,
|
||||
offset + OFFSETS.nwDst + IP4_ADDR_LEN
|
||||
)
|
||||
buffer.fill(
|
||||
0xff,
|
||||
offset + OFFSETS.nwDstMask,
|
||||
offset + OFFSETS.nwDstMask + IP4_ADDR_LEN
|
||||
)
|
||||
}
|
||||
|
||||
let tpSrc = 0
|
||||
if (object.tp_src !== undefined) {
|
||||
tpSrc = object.tp_src
|
||||
} else {
|
||||
wildcards |= WILDCARDS.tpSrc
|
||||
}
|
||||
buffer.writeUInt16BE(tpSrc, offset + OFFSETS.tpSrc)
|
||||
|
||||
let tpDst = 0
|
||||
if (object.tp_dst !== undefined) {
|
||||
tpDst = object.tp_dst
|
||||
} else {
|
||||
wildcards |= WILDCARDS.tpDst
|
||||
}
|
||||
buffer.writeUInt16BE(tpDst, offset + OFFSETS.tpDst)
|
||||
|
||||
let mplsLabel = 0
|
||||
if (object.mpls_label !== undefined) {
|
||||
mplsLabel = object.mpls_label
|
||||
} else {
|
||||
wildcards |= WILDCARDS.mplsLabel
|
||||
}
|
||||
buffer.writeUInt32BE(mplsLabel, offset + OFFSETS.mplsLabel)
|
||||
|
||||
let mplsTc = 0
|
||||
if (object.mpls_tc !== undefined) {
|
||||
mplsTc = object.mpls_tc
|
||||
} else {
|
||||
wildcards |= WILDCARDS.mplsTc
|
||||
}
|
||||
buffer.writeUInt8(mplsTc, offset + OFFSETS.mplsTc)
|
||||
|
||||
buffer.fill(0, offset + OFFSETS.pad2, offset + OFFSETS.pad2 + PAD2_LENGTH)
|
||||
|
||||
if (object.metadata !== undefined) {
|
||||
if (object.metadata_mask !== undefined) {
|
||||
buffer.copy(
|
||||
object.metadata_mask,
|
||||
0,
|
||||
offset + OFFSETS.metadataMask,
|
||||
offset + OFFSETS.metadataMask + METADATA_LENGTH
|
||||
)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.metadataMask,
|
||||
offset + OFFSETS.metadataMask + METADATA_LENGTH
|
||||
)
|
||||
}
|
||||
buffer.copy(
|
||||
object.metadata,
|
||||
0,
|
||||
offset + OFFSETS.metadata,
|
||||
offset + OFFSETS.metadata + METADATA_LENGTH
|
||||
)
|
||||
} else {
|
||||
buffer.fill(
|
||||
0x00,
|
||||
offset + OFFSETS.metadata,
|
||||
offset + OFFSETS.metadata + METADATA_LENGTH
|
||||
)
|
||||
buffer.fill(
|
||||
0xff,
|
||||
offset + OFFSETS.metadataMask,
|
||||
offset + OFFSETS.metadataMask + METADATA_LENGTH
|
||||
)
|
||||
}
|
||||
|
||||
buffer.writeUInt32BE(wildcards, offset + OFFSETS.wildcards)
|
||||
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const object = {}
|
||||
object.type = buffer.readUInt16BE(offset + OFFSETS.type)
|
||||
assert(object.type === of.matchType.standard)
|
||||
|
||||
object.length = buffer.readUInt16BE(offset + OFFSETS.length)
|
||||
assert(object.length === of.sizes.match)
|
||||
|
||||
// Wildcards indicate which value to use for the match.
|
||||
// if `wildcards & of.wildcards.<value>` === 0 then `value` is not wildcarded and must be used.
|
||||
const wildcards = (object.wildcards = buffer.readUInt32BE(
|
||||
offset + OFFSETS.wildcards
|
||||
))
|
||||
if ((wildcards & WILDCARDS.inPort) === 0) {
|
||||
object.in_port = buffer.readUInt32BE(offset + OFFSETS.inPort)
|
||||
}
|
||||
|
||||
if (!addressParser.isEthMaskAll(buffer, offset + OFFSETS.dlSrcMask)) {
|
||||
if (!addressParser.isEthMaskNone(buffer, offset + OFFSETS.dlSrcMask)) {
|
||||
object.dl_src_mask = addressParser.ethToString(
|
||||
buffer,
|
||||
offset + OFFSETS.dlSrcMask
|
||||
)
|
||||
}
|
||||
object.dl_src = addressParser.ethToString(buffer, offset + OFFSETS.dlSrc)
|
||||
}
|
||||
if (!addressParser.isEthMaskAll(buffer, offset + OFFSETS.dlDstMask)) {
|
||||
if (!addressParser.isEthMaskNone(buffer, offset + OFFSETS.dlDstMask)) {
|
||||
object.dl_dst_mask = addressParser.ethToString(
|
||||
buffer,
|
||||
offset + OFFSETS.dlDstMask
|
||||
)
|
||||
}
|
||||
object.dl_dst = addressParser.ethToString(buffer, offset + OFFSETS.dlDst)
|
||||
}
|
||||
|
||||
if ((wildcards & WILDCARDS.dlVlan) === 0) {
|
||||
object.dl_vlan = buffer.readUInt16BE(offset + OFFSETS.dlVlan)
|
||||
}
|
||||
if ((wildcards & WILDCARDS.dlVlanPcp) === 0) {
|
||||
object.dl_vlan_pcp = buffer.readUInt16BE(offset + OFFSETS.dlVlanPcp)
|
||||
}
|
||||
if ((wildcards & WILDCARDS.dlType) === 0) {
|
||||
object.dl_type = buffer.readUInt16BE(offset + OFFSETS.dlType)
|
||||
}
|
||||
|
||||
if ((wildcards & WILDCARDS.nwTos) === 0) {
|
||||
object.nw_tos = buffer.readUInt8(offset + OFFSETS.nwTos)
|
||||
}
|
||||
if ((wildcards & WILDCARDS.nwProto) === 0) {
|
||||
object.nw_proto = buffer.readUInt8(offset + OFFSETS.nwProto)
|
||||
}
|
||||
|
||||
if (!addressParser.isIp4MaskAll(buffer, offset + OFFSETS.nwSrcMask)) {
|
||||
if (!addressParser.isIp4MaskNone(buffer, offset + OFFSETS.nwSrcMask)) {
|
||||
object.nw_src_mask = addressParser.ip4ToString(
|
||||
buffer,
|
||||
offset + OFFSETS.nwSrcMask
|
||||
)
|
||||
}
|
||||
object.nw_src = addressParser.ip4ToString(buffer, offset + OFFSETS.nwSrc)
|
||||
}
|
||||
if (!addressParser.isIp4MaskAll(buffer, offset + OFFSETS.nwDstMask)) {
|
||||
if (!addressParser.isIp4MaskNone(buffer, offset + OFFSETS.nwDstMask)) {
|
||||
object.nw_dst_mask = addressParser.ip4ToString(
|
||||
buffer,
|
||||
offset + OFFSETS.nwDstMask
|
||||
)
|
||||
}
|
||||
object.nw_dst = addressParser.ip4ToString(buffer, offset + OFFSETS.nwDst)
|
||||
}
|
||||
|
||||
if ((wildcards & WILDCARDS.tpSrc) === 0) {
|
||||
object.tp_src = buffer.readUInt16BE(offset + OFFSETS.tpSrc)
|
||||
}
|
||||
if ((wildcards & WILDCARDS.tpDst) === 0) {
|
||||
object.tp_dst = buffer.readUInt16BE(offset + OFFSETS.tpDst)
|
||||
}
|
||||
|
||||
if ((wildcards & WILDCARDS.mplsLabel) === 0) {
|
||||
object.mpls_label = buffer.readUInt32BE(offset + OFFSETS.mplsLabel)
|
||||
}
|
||||
if ((wildcards & WILDCARDS.mplsTc) === 0) {
|
||||
object.mpls_tc = buffer.readUInt32BE(offset + OFFSETS.mplsTc)
|
||||
}
|
||||
|
||||
const metadataMask = [
|
||||
buffer.readUInt32BE(offset + OFFSETS.metadataMask),
|
||||
buffer.readUInt32BE(offset + OFFSETS.metadataMask + METADATA_LENGTH / 2),
|
||||
]
|
||||
if (!uIntHelper.isUInt64All(metadataMask)) {
|
||||
if (!uIntHelper.isUInt64None(metadataMask)) {
|
||||
object.metadata_mask = Buffer.alloc(METADATA_LENGTH)
|
||||
buffer.copy(
|
||||
object.metadata_mask,
|
||||
0,
|
||||
offset + OFFSETS.metadataMask,
|
||||
offset + OFFSETS.metadataMask + METADATA_LENGTH
|
||||
)
|
||||
}
|
||||
object.metadata = Buffer.alloc(METADATA_LENGTH)
|
||||
buffer.copy(
|
||||
object.metadata,
|
||||
0,
|
||||
offset + OFFSETS.metadata,
|
||||
offset + OFFSETS.metadata + METADATA_LENGTH
|
||||
)
|
||||
}
|
||||
|
||||
return object
|
||||
},
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
import of from '../openflow-11'
|
||||
import addressParser from '../../util/addrress-parser'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const OFFSETS = of.offsets.port
|
||||
const PAD_LENGTH = 4
|
||||
const PAD2_LENGTH = 2
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
pack: (object, buffer = undefined, offset = 0) => {
|
||||
buffer = buffer !== undefined ? buffer : Buffer.alloc(of.sizes.port)
|
||||
const {
|
||||
port_no: portNo,
|
||||
hw_addr: hwAddr,
|
||||
name,
|
||||
config,
|
||||
state,
|
||||
curr,
|
||||
advertised,
|
||||
supported,
|
||||
peer,
|
||||
curr_speed: currSpeed,
|
||||
max_speed: maxSpeed,
|
||||
} = object
|
||||
|
||||
buffer.writeUInt32BE(portNo, offset + OFFSETS.portNo)
|
||||
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD_LENGTH)
|
||||
addressParser.stringToEth(hwAddr, buffer, offset + OFFSETS.hwAddr)
|
||||
buffer.fill(0, offset + OFFSETS.pad, offset + OFFSETS.pad + PAD2_LENGTH)
|
||||
buffer.write(name, offset + OFFSETS.name, of.maxPortNameLen)
|
||||
if (name.length < of.maxPortNameLen) {
|
||||
buffer.fill(
|
||||
0,
|
||||
offset + OFFSETS.name + name.length,
|
||||
offset + OFFSETS.name + of.maxPortNameLen
|
||||
)
|
||||
}
|
||||
|
||||
buffer.writeUInt32BE(config, offset + OFFSETS.config)
|
||||
buffer.writeUInt32BE(state, offset + OFFSETS.state)
|
||||
buffer.writeUInt32BE(curr, offset + OFFSETS.curr)
|
||||
buffer.writeUInt32BE(advertised, offset + OFFSETS.advertised)
|
||||
buffer.writeUInt32BE(supported, offset + OFFSETS.supported)
|
||||
buffer.writeUInt32BE(peer, offset + OFFSETS.peer)
|
||||
buffer.writeUInt32BE(currSpeed, offset + OFFSETS.currSpeed)
|
||||
buffer.writeUInt32BE(maxSpeed, offset + OFFSETS.maxSpeed)
|
||||
return buffer
|
||||
},
|
||||
|
||||
unpack: (buffer, offset = 0) => {
|
||||
const body = {}
|
||||
|
||||
body.port_no = buffer.readUInt32BE(offset + OFFSETS.portNo)
|
||||
body.hw_addr = addressParser.ethToString(buffer, offset + OFFSETS.hwAddr)
|
||||
|
||||
const name = buffer.toString(
|
||||
'utf8',
|
||||
offset + OFFSETS.name,
|
||||
offset + OFFSETS.name + of.maxPortNameLen
|
||||
)
|
||||
body.name = name.substr(0, name.indexOf('\0')) // Remove useless 0 if name.length < of.maxPortNameLen
|
||||
|
||||
body.config = buffer.readUInt32BE(offset + OFFSETS.config)
|
||||
body.state = buffer.readUInt32BE(offset + OFFSETS.state)
|
||||
|
||||
body.curr = buffer.readUInt32BE(offset + OFFSETS.curr)
|
||||
body.advertised = buffer.readUInt32BE(offset + OFFSETS.advertised)
|
||||
body.supported = buffer.readUInt32BE(offset + OFFSETS.supported)
|
||||
body.peer = buffer.readUInt32BE(offset + OFFSETS.peer)
|
||||
|
||||
body.curr_speed = buffer.readUInt32BE(offset + OFFSETS.currSpeed)
|
||||
body.max_speed = buffer.readUInt32BE(offset + OFFSETS.maxSpeed)
|
||||
|
||||
return body
|
||||
},
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import of from './index'
|
||||
import scheme from './default-header-scheme'
|
||||
import { readChunk } from '@vates/read-chunk'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default async function* parse(socket) {
|
||||
let buffer = Buffer.alloc(1024)
|
||||
let data
|
||||
|
||||
// Read the header
|
||||
while ((data = await readChunk(socket, scheme.size)) !== null) {
|
||||
// Read OpenFlow message size from its header
|
||||
const msgSize = data.readUInt16BE(scheme.offsets.length)
|
||||
data.copy(buffer, 0, 0, scheme.size)
|
||||
|
||||
if (buffer.length < msgSize) {
|
||||
buffer = resize(buffer, msgSize)
|
||||
}
|
||||
|
||||
// Read the rest of the openflow message
|
||||
if (msgSize > scheme.size) {
|
||||
data = await readChunk(socket, msgSize - scheme.size)
|
||||
assert.notStrictEqual(data, null)
|
||||
data.copy(buffer, scheme.size, 0, msgSize - scheme.size)
|
||||
}
|
||||
|
||||
yield of.unpack(buffer)
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
function resize(buffer, size) {
|
||||
let newLength = buffer.length
|
||||
do {
|
||||
newLength *= 2
|
||||
} while (newLength < size)
|
||||
|
||||
const newBuffer = Buffer.alloc(newLength)
|
||||
buffer.copy(newBuffer)
|
||||
return newBuffer
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
import assert from 'assert'
|
||||
import util from 'util'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
isEthMaskNone: (buffer, offset) =>
|
||||
buffer.readUInt32BE(offset) === 0x00000000 &&
|
||||
buffer.readUInt16BE(offset + 4) === 0x0000,
|
||||
|
||||
isEthMaskAll: (buffer, offset) =>
|
||||
buffer.readUInt32BE(offset) === 0xffffffff &&
|
||||
buffer.readUInt16BE(offset + 4) === 0xffff,
|
||||
|
||||
isIp4MaskNone: (buffer, offset) => buffer.readUInt32BE(offset) === 0x00000000,
|
||||
|
||||
isIp4MaskAll: (buffer, offset) => buffer.readUInt32BE(offset) === 0xffffffff,
|
||||
|
||||
ethToString: (buffer, offset) =>
|
||||
buffer.toString('hex', offset, offset + 1) +
|
||||
':' +
|
||||
buffer.toString('hex', offset + 1, offset + 2) +
|
||||
':' +
|
||||
buffer.toString('hex', offset + 2, offset + 3) +
|
||||
':' +
|
||||
buffer.toString('hex', offset + 3, offset + 4) +
|
||||
':' +
|
||||
buffer.toString('hex', offset + 4, offset + 5) +
|
||||
':' +
|
||||
buffer.toString('hex', offset + 5, offset + 6),
|
||||
|
||||
stringToEth: (string, buffer, offset) => {
|
||||
const eth = /^([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2}):([0-9A-Fa-f]{2})$/.exec(
|
||||
string
|
||||
)
|
||||
assert(eth !== null)
|
||||
buffer.writeUInt8(parseInt(eth[1], 16), offset)
|
||||
buffer.writeUInt8(parseInt(eth[2], 16), offset + 1)
|
||||
buffer.writeUInt8(parseInt(eth[3], 16), offset + 2)
|
||||
buffer.writeUInt8(parseInt(eth[4], 16), offset + 3)
|
||||
buffer.writeUInt8(parseInt(eth[5], 16), offset + 4)
|
||||
buffer.writeUInt8(parseInt(eth[6], 16), offset + 5)
|
||||
},
|
||||
|
||||
ip4ToString: (buffer, offset) =>
|
||||
util.format(
|
||||
'%d.%d.%d.%d',
|
||||
buffer.readUInt8(offset),
|
||||
buffer.readUInt8(offset + 1),
|
||||
buffer.readUInt8(offset + 2),
|
||||
buffer.readUInt8(offset + 3)
|
||||
),
|
||||
|
||||
stringToip4: (string, buffer, offset) => {
|
||||
const ip = /^([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])$/.exec(
|
||||
string
|
||||
)
|
||||
assert(ip !== null)
|
||||
buffer.writeUInt8(parseInt(ip[1], 10), offset)
|
||||
buffer.writeUInt8(parseInt(ip[2], 10), offset + 1)
|
||||
buffer.writeUInt8(parseInt(ip[3], 10), offset + 2)
|
||||
buffer.writeUInt8(parseInt(ip[4], 10), offset + 3)
|
||||
},
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
export default function get(map, key, errorMsg = undefined) {
|
||||
const value = map[String(key)]
|
||||
assert.notStrictEqual(
|
||||
value,
|
||||
undefined,
|
||||
errorMsg !== undefined ? errorMsg : `${key} is invalid`
|
||||
)
|
||||
return value
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
const ZERO = 0x00000000
|
||||
const ALL = 0xffffffff
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
isUInt64None: n => n[0] === ZERO && n[1] === ZERO,
|
||||
|
||||
isUInt64All: n => n[0] === ALL && n[1] === ALL,
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
export default {
|
||||
// TODO: more version
|
||||
// openFlow10: 0x01,
|
||||
openFlow11: 0x02,
|
||||
// openFlow12: 0x03,
|
||||
// openFlow13: 0x04,
|
||||
// openFlow14: 0x05,
|
||||
// openFlow15: 0x06,
|
||||
}
|
||||
@@ -19,14 +19,7 @@ Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/self-
|
||||
```js
|
||||
import { genSelfSigned } from '@xen-orchestra/self-signed'
|
||||
|
||||
console.log(
|
||||
await genSelfSigned({
|
||||
// Number of days this certificate will be valid.
|
||||
//
|
||||
// Default: 360
|
||||
days: 600,
|
||||
})
|
||||
)
|
||||
console.log(await genSelfSigned())
|
||||
// {
|
||||
// cert: '-----BEGIN CERTIFICATE-----\n' +
|
||||
// // content…
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
```js
|
||||
import { genSelfSigned } from '@xen-orchestra/self-signed'
|
||||
|
||||
console.log(
|
||||
await genSelfSigned({
|
||||
// Number of days this certificate will be valid.
|
||||
//
|
||||
// Default: 360
|
||||
days: 600,
|
||||
})
|
||||
)
|
||||
console.log(await genSelfSigned())
|
||||
// {
|
||||
// cert: '-----BEGIN CERTIFICATE-----\n' +
|
||||
// // content…
|
||||
|
||||
@@ -10,12 +10,12 @@ const openssl = (cmd, args, { input, ...opts } = {}) =>
|
||||
}
|
||||
})
|
||||
|
||||
exports.genSelfSignedCert = async ({ days = 360 } = {}) => {
|
||||
exports.genSelfSignedCert = async () => {
|
||||
const key = await openssl('genrsa', ['2048'])
|
||||
return {
|
||||
cert: await openssl(
|
||||
'req',
|
||||
['-batch', '-new', '-key', '-', '-x509', '-days', String(days), '-nodes'],
|
||||
['-batch', '-new', '-key', '-', '-x509', '-days', '360', '-nodes'],
|
||||
{
|
||||
input: key,
|
||||
}
|
||||
|
||||
@@ -33,23 +33,23 @@
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"chalk": "^4.1.0",
|
||||
"chalk": "^2.2.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"fs-extra": "^9.0.0",
|
||||
"fs-promise": "^2.0.3",
|
||||
"get-stream": "^6.0.0",
|
||||
"get-stream": "^4.1.0",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"human-format": "^0.11.0",
|
||||
"human-format": "^0.10.0",
|
||||
"l33teral": "^3.0.3",
|
||||
"lodash": "^4.17.4",
|
||||
"nice-pipe": "0.0.0",
|
||||
"pretty-ms": "^7.0.0",
|
||||
"pretty-ms": "^4.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"pw": "^0.0.4",
|
||||
"strip-indent": "^3.0.0",
|
||||
"xdg-basedir": "^4.0.0",
|
||||
"strip-indent": "^2.0.0",
|
||||
"xdg-basedir": "^3.0.0",
|
||||
"xo-lib": "^0.9.0",
|
||||
"xo-vmdk-to-vhd": "^1.3.1"
|
||||
"xo-vmdk-to-vhd": "^1.2.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
206
CHANGELOG.md
206
CHANGELOG.md
@@ -1,213 +1,11 @@
|
||||
# ChangeLog
|
||||
|
||||
## **5.51.1** (2020-10-14)
|
||||
## **5.48.3** (2020-07-10)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Host/Advanced] Add the field `IOMMU` if it is defined (PR [#5294](https://github.com/vatesfr/xen-orchestra/pull/5294))
|
||||
- [Backup logs/report] Hide merge task when no merge is done (PR [#5263](https://github.com/vatesfr/xen-orchestra/pull/5263))
|
||||
- [New backup] Enable created schedules by default (PR [#5280](https://github.com/vatesfr/xen-orchestra/pull/5280))
|
||||
- [Backup/overview] Link backup jobs/schedules to their corresponding logs [#4564](https://github.com/vatesfr/xen-orchestra/issues/4564) (PR [#5260](https://github.com/vatesfr/xen-orchestra/pull/5260))
|
||||
- [VM] Hide backup tab for non-admin users [#5309](https://github.com/vatesfr/xen-orchestra/issues/5309) (PR [#5317](https://github.com/vatesfr/xen-orchestra/pull/5317))
|
||||
- [VM/Bulk migrate] Sort hosts in the select so that the hosts on the same pool are shown first [#4462](https://github.com/vatesfr/xen-orchestra/issues/4462) (PR [#5308](https://github.com/vatesfr/xen-orchestra/pull/5308))
|
||||
- [Proxy] Ability to update HTTP proxy configuration on XOA proxy (PR [#5148](https://github.com/vatesfr/xen-orchestra/pull/5148))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [XOA/Notifications] Don't show expired notifications (PR [#5304](https://github.com/vatesfr/xen-orchestra/pull/5304))
|
||||
- [Backup/S3] Fix secret key edit form [#5233](https://github.com/vatesfr/xen-orchestra/issues/5233) (PR[#5305](https://github.com/vatesfr/xen-orchestra/pull/5305))
|
||||
- [New network] Remove the possibility of creating a network on a bond member interface (PR [#5262](https://github.com/vatesfr/xen-orchestra/pull/5262))
|
||||
- [User] Fix custom filters not showing up when selecting a default filter for templates (PR [#5298](https://github.com/vatesfr/xen-orchestra/pull/5298))
|
||||
- [Self/VDI migration] Fix hidden VDI after migration (PR [#5296](https://github.com/vatesfr/xen-orchestra/pull/5296))
|
||||
- [Self/VDI migration] Fix `not enough permissions` error (PR [#5299](https://github.com/vatesfr/xen-orchestra/pull/5299))
|
||||
- [Home] Hide backup filter for non-admin users [#5285](https://github.com/vatesfr/xen-orchestra/issues/5285) (PR [#5264](https://github.com/vatesfr/xen-orchestra/pull/5264))
|
||||
- [Backup/S3] Fix request signature error [#5253](https://github.com/vatesfr/xen-orchestra/issues/5253) (PR[#5315](https://github.com/vatesfr/xen-orchestra/pull/5315))
|
||||
- [SDN Controller] Fix tunnel traffic going on the wrong NIC: see https://xcp-ng.org/forum/topic/3544/mtu-problems-with-vxlan. (PR [#5281](https://github.com/vatesfr/xen-orchestra/pull/5281))
|
||||
- [Settings/IP Pools] Fix some IP ranges being split into multiple ranges in the UI [#3170](https://github.com/vatesfr/xen-orchestra/issues/3170) (PR [#5314](https://github.com/vatesfr/xen-orchestra/pull/5314))
|
||||
- [Self/Delete] Detach VMs and remove their ACLs on removing a resource set [#4797](https://github.com/vatesfr/xen-orchestra/issues/4797) (PR [#5312](https://github.com/vatesfr/xen-orchestra/pull/5312))
|
||||
- Fix `not enough permissions` error when accessing some pages as a Self Service user (PR [#5303](https://github.com/vatesfr/xen-orchestra/pull/5303))
|
||||
- [VM] Explicit error when VM migration failed due to unset default SR on destination pool [#5282](https://github.com/vatesfr/xen-orchestra/issues/5282) (PR [#5306](https://github.com/vatesfr/xen-orchestra/pull/5306))
|
||||
|
||||
### Packages to release
|
||||
|
||||
- xo-server-sdn-controller 1.0.4
|
||||
- xo-server-backup-reports 0.16.7
|
||||
- xo-server 5.68.0
|
||||
- xo-web 5.72.0
|
||||
|
||||
## **5.51.0** (2020-09-30)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Self/VDI migration] Ability to migrate VDIs to other SRs within a resource set [#5020](https://github.com/vatesfr/xen-orchestra/issues/5020) (PR [#5201](https://github.com/vatesfr/xen-orchestra/pull/5201))
|
||||
- [LDAP] Ability to import LDAP groups to XO [#1884](https://github.com/vatesfr/xen-orchestra/issues/1884) (PR [#5279](https://github.com/vatesfr/xen-orchestra/pull/5279))
|
||||
- [Tasks] Show XO objects linked to pending/finished tasks [#4275](https://github.com/vatesfr/xen-orchestra/issues/4275) (PR [#5267](https://github.com/vatesfr/xen-orchestra/pull/5267))
|
||||
- [Backup logs] Ability to filter by VM/pool name [#4406](https://github.com/vatesfr/xen-orchestra/issues/4406) (PR [#5208](https://github.com/vatesfr/xen-orchestra/pull/5208))
|
||||
- [Backup/logs] Log's tasks pagination [#4406](https://github.com/vatesfr/xen-orchestra/issues/4406) (PR [#5209](https://github.com/vatesfr/xen-orchestra/pull/5209))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM Import] Make the `Description` field optional (PR [#5258](https://github.com/vatesfr/xen-orchestra/pull/5258))
|
||||
- [New VM] Hide missing ISOs in selector [#5222](https://github.com/vatesfr/xen-orchestra/issues/5222)
|
||||
- [Dashboard/Health] Show VMs that have too many snapshots [#5238](https://github.com/vatesfr/xen-orchestra/pull/5238)
|
||||
- [Groups] Ability to delete multiple groups at once (PR [#5264](https://github.com/vatesfr/xen-orchestra/pull/5264))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Import VMDK] Fix `No position specified for vmdisk1` error (PR [#5255](https://github.com/vatesfr/xen-orchestra/pull/5255))
|
||||
- [API] Fix `this.removeSubjectFromResourceSet is not a function` error on calling `resourceSet.removeSubject` via `xo-cli` [#5265](https://github.com/vatesfr/xen-orchestra/issues/5265) (PR [#5266](https://github.com/vatesfr/xen-orchestra/pull/5266))
|
||||
- [Import OVA] Fix frozen UI when dropping a big OVA on the page (PR [#5274](https://github.com/vatesfr/xen-orchestra/pull/5274))
|
||||
- [Remotes/S3] Fix S3 backup of 50GB+ files [#5197](https://github.com/vatesfr/xen-orchestra/issues/5197) (PR[ #5242](https://github.com/vatesfr/xen-orchestra/pull/5242) )
|
||||
- [Import OVA] Improve import speed of embedded gzipped VMDK disks (PR [#5275](https://github.com/vatesfr/xen-orchestra/pull/5275))
|
||||
- [Remotes] Fix editing bucket and directory for S3 remotes [#5233](https://github.com/vatesfr/xen-orchestra/issues/5233) (PR [5276](https://github.com/vatesfr/xen-orchestra/pull/5276))
|
||||
|
||||
### Packages to release
|
||||
|
||||
- xo-server-auth-ldap 0.9.0
|
||||
- @xen-orchestra/fs 0.11.1
|
||||
- xo-vmdk-to-vhd 1.3.1
|
||||
- xo-server 5.67.0
|
||||
- xo-web 5.71.0
|
||||
|
||||
## **5.50.3** (2020-09-17)
|
||||
|
||||

|
||||
|
||||
### Packages to release
|
||||
|
||||
- xo-server-audit 0.8.0
|
||||
|
||||
## **5.50.2** (2020-09-10)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/network] VIF's locking mode: improve tooltip messages [#4713](https://github.com/vatesfr/xen-orchestra/issues/4713) (PR [#5227](https://github.com/vatesfr/xen-orchestra/pull/5227))
|
||||
- [Backup/overview] Link log entry to its job [#4564](https://github.com/vatesfr/xen-orchestra/issues/4564) (PR [#5202](https://github.com/vatesfr/xen-orchestra/pull/5202))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [New SR] Fix `Cannot read property 'trim' of undefined` error (PR [#5212](https://github.com/vatesfr/xen-orchestra/pull/5212))
|
||||
- [Dashboard/Health] Fix suspended VDIs considered as orphans [#5248](https://github.com/vatesfr/xen-orchestra/issues/5248) (PR [#5249](https://github.com/vatesfr/xen-orchestra/pull/5249))
|
||||
|
||||
### Packages to release
|
||||
|
||||
- xo-server-audit 0.7.2
|
||||
- xo-web 5.70.0
|
||||
- xo-server 5.66.2
|
||||
|
||||
## **5.50.1** (2020-09-04)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Usage report] Exclude replicated VMs from the VMs evolution [#4778](https://github.com/vatesfr/xen-orchestra/issues/4778) (PR [#5241](https://github.com/vatesfr/xen-orchestra/pull/5241))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [VM/Network] Fix TX checksumming [#5234](https://github.com/vatesfr/xen-orchestra/issues/5234)
|
||||
|
||||
### Packages to release
|
||||
|
||||
- xo-server-usage-report 0.9.0
|
||||
- xo-server-audit 0.7.1
|
||||
- xo-server 5.66.1
|
||||
|
||||
## **5.50.0** (2020-08-27)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Health/Orphan VDIs] Improve heuristic and list both VDI snapshots and normal VDIs (PR [#5228](https://github.com/vatesfr/xen-orchestra/pull/5228))
|
||||
- [[Audit] Regularly save fingerprints on remote server for better tempering detection](https://xen-orchestra.com/blog/xo-audit/) [#4844](https://github.com/vatesfr/xen-orchestra/issues/4844) (PR [#5077](https://github.com/vatesfr/xen-orchestra/pull/5077))
|
||||
- [VM/Network] Ability to change a VIF's locking mode [#4713](https://github.com/vatesfr/xen-orchestra/issues/4713) (PR [#5188](https://github.com/vatesfr/xen-orchestra/pull/5188))
|
||||
- [VM/Network] Ability to set VIF TX checksumming [#5095](https://github.com/vatesfr/xen-orchestra/issues/5095) (PR [#5182](https://github.com/vatesfr/xen-orchestra/pull/5182))
|
||||
- [Host/Network] Button to refresh the list of physical interfaces [#5230](https://github.com/vatesfr/xen-orchestra/issues/5230)
|
||||
- [VM] Ability to protect VM from accidental shutdown [#4773](https://github.com/vatesfr/xen-orchestra/issues/4773)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Proxy] Improve health check error messages [#5161](https://github.com/vatesfr/xen-orchestra/issues/5161) (PR [#5191](https://github.com/vatesfr/xen-orchestra/pull/5191))
|
||||
- [VM/Console] Hide missing ISOs in selector [#5222](https://github.com/vatesfr/xen-orchestra/issues/5222)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Proxy/deploy] Fix `no such proxy ok` error on a failure trial start (PR [#5196](https://github.com/vatesfr/xen-orchestra/pull/5196))
|
||||
- [VM/snapshots] Fix redirection when creating a VM from a snapshot (PR [#5213](https://github.com/vatesfr/xen-orchestra/pull/5213))
|
||||
- [User] Fix `Incorrect password` error when changing password [#5218](https://github.com/vatesfr/xen-orchestra/issues/5218) (PR [#5221](https://github.com/vatesfr/xen-orchestra/pull/5221))
|
||||
- [Audit] Obfuscate sensitive data in `user.changePassword` action's records [#5219](https://github.com/vatesfr/xen-orchestra/issues/5219) (PR [#5220](https://github.com/vatesfr/xen-orchestra/pull/5220))
|
||||
- [SDN Controller] Fix `Cannot read property '$network' of undefined` error at the network creation (PR [#5217](https://github.com/vatesfr/xen-orchestra/pull/5217))
|
||||
|
||||
### Packages to release
|
||||
|
||||
- xo-server-audit 0.7.0
|
||||
- xo-server-sdn-controller 1.0.3
|
||||
- xo-server 5.66.0
|
||||
- xo-web 5.69.0
|
||||
|
||||
## **5.49.1** (2020-08-05)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SR/advanced] Show thin/thick provisioning for missing SR types (PR [#5204](https://github.com/vatesfr/xen-orchestra/pull/5204))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Patches] Don't log errors related to missing patches listing (Previous fix in 5.48.3 was not working)
|
||||
|
||||
### Packages to release
|
||||
|
||||
- xo-server 5.64.1
|
||||
- xo-server-sdn-controller 1.0.2
|
||||
- xo-web 5.67.0
|
||||
|
||||
## **5.49.0** (2020-07-31)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Home/VM, host] Ability to filter by power state (PR [#5118](https://github.com/vatesfr/xen-orchestra/pull/5118))
|
||||
- [Proxy/deploy] Ability to set HTTP proxy configuration (PR [#5145](https://github.com/vatesfr/xen-orchestra/pull/5145))
|
||||
- [Import/OVA] Allow for VMDK disks inside .ova files to be gzipped (PR [#5085](https://github.com/vatesfr/xen-orchestra/pull/5085))
|
||||
- [Proxy] Show pending upgrades (PR [#5167](https://github.com/vatesfr/xen-orchestra/pull/5167))
|
||||
- [SDN Controller] Add/Remove netork traffic rules for a VM's VIFs (PR [#5135](https://github.com/vatesfr/xen-orchestra/pull/5135))
|
||||
- [Backup/health] Show VM snapshots with missing jobs, schedules or VMs [#5086](https://github.com/vatesfr/xen-orchestra/issues/5086) (PR [#5125](https://github.com/vatesfr/xen-orchestra/pull/5125))
|
||||
- [New delta backup] Show a warning icon when the advanced full backup interval setting and the backup retention are higher than 50 (PR (https://github.com/vatesfr/xen-orchestra/pull/5144))
|
||||
- [VM/network] Improve the network locking mode feedback [#4713](https://github.com/vatesfr/xen-orchestra/issues/4713) (PR [#5170](https://github.com/vatesfr/xen-orchestra/pull/5170))
|
||||
- [Remotes] Add AWS S3 as a backup storage
|
||||
- [New VM] Only make network boot option first when the VM has no disks or when the network installation is chosen [#4980](https://github.com/vatesfr/xen-orchestra/issues/4980) (PR [#5119](https://github.com/vatesfr/xen-orchestra/pull/5119))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Log the `Invalid XML-RPC message` error as an unexpected response (PR [#5138](https://github.com/vatesfr/xen-orchestra/pull/5138))
|
||||
- [VM/disks] By default, sort disks by their device position instead of their name [#5163](https://github.com/vatesfr/xen-orchestra/issues/5163) (PR [#5165](https://github.com/vatesfr/xen-orchestra/pull/5165))
|
||||
- [Schedule/edit] Ability to enable/disable an ordinary job's schedule [#5026](https://github.com/vatesfr/xen-orchestra/issues/5026) (PR [#5111](https://github.com/vatesfr/xen-orchestra/pull/5111))
|
||||
- [New schedule] Enable 'Enable immediately after creation' by default (PR [#5111](https://github.com/vatesfr/xen-orchestra/pull/5111))
|
||||
- [Self Service] Ability to globally ignore snapshots in resource set quotas (PR [#5164](https://github.com/vatesfr/xen-orchestra/pull/5164))
|
||||
- [Self] Ability to cancel a resource set edition without saving it (PR [#5174](https://github.com/vatesfr/xen-orchestra/pull/5174))
|
||||
- [VIF] Ability to click an IP address to copy it to the clipboard [#5185](https://github.com/vatesfr/xen-orchestra/issues/5185) (PR [#5186](https://github.com/vatesfr/xen-orchestra/pull/5186))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Backup/Restore] Fixes `an error has occurred` when all backups for a specific VM have been deleted (PR [#5156](https://github.com/vatesfr/xen-orchestra/pull/5156))
|
||||
- [OVA Import] Fix import of Red Hat generated .ova files (PR [#5159](https://github.com/vatesfr/xen-orchestra/pull/5159))
|
||||
- [Fast clone] Fix bug where the name of the created VM would be "undefined_clone" (PR [#5173](https://github.com/vatesfr/xen-orchestra/pull/5173))
|
||||
- [Audit] Fix unreadable exported records format (PR [#5179](https://github.com/vatesfr/xen-orchestra/pull/5179))
|
||||
- [SDN Controller] Fixes TLS error `dh key too small` [#5074](https://github.com/vatesfr/xen-orchestra/issues/5074) (PR [#5187](https://github.com/vatesfr/xen-orchestra/pull/5187))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-audit 0.6.1
|
||||
- @xen-orchestra/openflow 0.1.1
|
||||
- xo-server-sdn-controller 1.0.1
|
||||
- xo-vmdk-to-vhd 1.3.0
|
||||
- xo-remote-parser 0.6.0
|
||||
- @xen-orchestra/fs 0.11.0
|
||||
- xo-server 5.64.0
|
||||
- xo-web 5.66.0
|
||||
|
||||
## **5.48.3** (2020-07-10)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Audit] Logging user actions is now opt-in (PR [#5151](https://github.com/vatesfr/xen-orchestra/pull/5151))
|
||||
- [Settings/Audit] Warn if logging is inactive (PR [#5152](https://github.com/vatesfr/xen-orchestra/pull/5152))
|
||||
|
||||
@@ -304,6 +102,8 @@
|
||||
|
||||
## **5.47.1** (2020-06-02)
|
||||
|
||||

|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [auth-ldap] Sign in was broken in XO 5.47.0 (PR [#5039](https://github.com/vatesfr/xen-orchestra/pull/5039))
|
||||
|
||||
@@ -7,16 +7,10 @@
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Host/Advanced] Display installed certificates [#5134](https://github.com/vatesfr/xen-orchestra/issues/5134) (PR [#5319](https://github.com/vatesfr/xen-orchestra/pull/5319))
|
||||
- [VM/network] Allow Self Service users to change a VIF's network [#5020](https://github.com/vatesfr/xen-orchestra/issues/5020) (PR [#5203](https://github.com/vatesfr/xen-orchestra/pull/5203))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [Host] Fix power state stuck on busy after power off [#4919](https://github.com/vatesfr/xen-orchestra/issues/4919) (PR [#5288](https://github.com/vatesfr/xen-orchestra/pull/5288))
|
||||
- [VM/Network] Don't allow users to change a VIF's locking mode if they don't have permissions on the network (PR [#5283](https://github.com/vatesfr/xen-orchestra/pull/5283))
|
||||
|
||||
### Packages to release
|
||||
|
||||
> Packages will be released in the order they are here, therefore, they should
|
||||
@@ -33,9 +27,3 @@
|
||||
> - major: if the change breaks compatibility
|
||||
>
|
||||
> In case of conflict, the highest (lowest in previous list) `$version` wins.
|
||||
|
||||
- vhd-lib minor
|
||||
- @xen-orchestra/audit-core minor
|
||||
- xo-server-audit minor
|
||||
- xo-web minor
|
||||
- xo-server minor
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 35 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 27 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 25 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 87 KiB |
@@ -83,7 +83,3 @@ To check your free space, enter your XOA and run `xoa check` to check free syste
|
||||
This is happening when you have a _smart backup job_ that doesn't match any VMs. For example: you created a job to backup all running VMs. If no VMs are running on backup schedule, you'll have this message. This could also happen if you lost connection with your pool master (the VMs aren't visible anymore from Xen Orchestra).
|
||||
|
||||
Edit your job and try to see matching VMs or check if your pool is connected to XOA.
|
||||
|
||||
## Error: SR_OPERATION_NOT_SUPPORTED
|
||||
|
||||
This error can be caused by leaving any removable device (such as USB storage) attached to the VM that you are backing up or snapshotting, detach the device and retry. This can also be caused if you created a VM disk using the [RAW format](https://xcp-ng.org/docs/storage.html#using-raw-format).
|
||||
|
||||
@@ -72,7 +72,7 @@ Now if you do this:
|
||||
|
||||
It means any VMs on "Lab Pool" with the "prod" tag will be backed up.
|
||||
|
||||
## RAM Enabled backup
|
||||
## RAM Enabled bakcup
|
||||
|
||||
:::tip
|
||||
This feature is **only compatible** with XCP-ng 8.0 or more recent. Citrix Hypervisor didn't yet merge our changes, despite we contributed to their code directly.
|
||||
|
||||
@@ -14,18 +14,6 @@ This section will cover the license management system for commercial editions of
|
||||
|
||||

|
||||
|
||||
## Rebind XO license
|
||||
|
||||
:::warning
|
||||
A license can only be bind to a single appliance at the same time, rebind your license will unbind the license from any other appliance.
|
||||
Once a license is bind, the only way to unbind it is to contact us with a [support ticket](https://xen-orchestra.com/#!/member/support)!
|
||||
:::
|
||||
|
||||
To rebind your Xen Orchestra appliance, you simply need to connect on the **appliance on which you want to bind the license** and click on the rebind option (Move license to this XOA button) in the license section
|
||||
|
||||

|
||||
|
||||
You will then have a confirmation screen
|
||||
|
||||

|
||||
|
||||
Once it's done, you simply need to proceed to an upgrade on your freshly binded appliance to download the correct edition packages.
|
||||
|
||||
@@ -36,7 +36,7 @@ In the network creation view:
|
||||
|
||||
:::tip
|
||||
|
||||
- All hosts in a private network must be able to reach the other hosts' management interface and all hosts must be able to reach one another on the interface selected for private networks creation.
|
||||
- All hosts in a private network must be able to reach the other hosts' management interface.
|
||||
> The term ‘management interface’ is used to indicate the IP-enabled NIC that carries the management traffic.
|
||||
- Only 1 encrypted GRE network and 1 encrypted VxLAN network per pool can exist at a time due to Open vSwitch limitation.
|
||||
:::
|
||||
@@ -75,32 +75,6 @@ Encryption is not available prior to XCP-ng 8.0.
|
||||
|
||||
## OpenFlow rules
|
||||
|
||||
Please see the [devblog about OpenFlow rules](https://xen-orchestra.com/blog/vms-vif-network-traffic-control/).
|
||||
|
||||
This feature requires the OpenFlow port to be opened
|
||||
|
||||
In the VM network tab a new column has been added: _Network rules_.
|
||||
|
||||
- The _Add rule_ button display a form to add a new rule choosing to:
|
||||
- enable/disable the matching traffic
|
||||
- for a specific protocol (optionnal)
|
||||
- on a specific port (optionnal)
|
||||
- matching a specific IP or IP range (optionnal)
|
||||
- coming from the VIF / going to the VIF / both
|
||||
- The _Show rules_ button allow to display all rules for a VIF.
|
||||
- When the rules are display a button to delete a rule is available.
|
||||
|
||||

|
||||

|
||||
|
||||
:::tip
|
||||
|
||||
- This feature requires the OpenFlow port (TCP 6653) to be opened. (See [the requirements](#openflow))
|
||||
:::
|
||||
|
||||
### Requirements
|
||||
|
||||
### Openflow
|
||||
|
||||
- On XCP-ng prior to 8.0:
|
||||
- To be able to use `OpenFlow`, the following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `OpenFlow` is wanted: `-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m tcp --dport 6653 -j ACCEPT`
|
||||
This feature is about to be released soon. Stay tuned!
|
||||
:::
|
||||
|
||||
@@ -237,11 +237,7 @@ Then, you can define quotas on this set:
|
||||
- max disk usage
|
||||
|
||||
:::tip
|
||||
Replicated VMs and snapshots created by a backup job don't use quotas.
|
||||
:::
|
||||
|
||||
:::tip
|
||||
A snapshot of a Self Service VM will use as much resources as a VM would. You can disable this by setting `ignoreVmSnapshotResources` to `true` in the `selfService` section of `xo-server`'s config.
|
||||
Snapshotting a VM within a self-service will _not_ use the quota from the resource set. The same rule applies for backups and replication.
|
||||
:::
|
||||
|
||||
When you click on create, you can see the resource set and remove or edit it:
|
||||
|
||||
20
package.json
20
package.json
@@ -6,7 +6,7 @@
|
||||
"babel-eslint": "^10.0.1",
|
||||
"babel-jest": "^26.0.1",
|
||||
"benchmark": "^2.1.4",
|
||||
"eslint": "^7.6.0",
|
||||
"eslint": "^6.0.1",
|
||||
"eslint-config-prettier": "^6.0.0",
|
||||
"eslint-config-standard": "^14.1.0",
|
||||
"eslint-config-standard-jsx": "^8.1.0",
|
||||
@@ -17,7 +17,7 @@
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.131.0",
|
||||
"flow-bin": "^0.126.0",
|
||||
"globby": "^11.0.1",
|
||||
"handlebars": "^4.7.6",
|
||||
"husky": "^4.2.5",
|
||||
@@ -39,15 +39,6 @@
|
||||
},
|
||||
"jest": {
|
||||
"collectCoverage": true,
|
||||
"moduleNameMapper": {
|
||||
"^.": "./src",
|
||||
"^(@vates/[^/]+)": "$1/src",
|
||||
"^(@xen-orchestra/[^/]+)": "$1/src",
|
||||
"^(value-matcher)": "$1/src",
|
||||
"^(vhd-cli)": "$1/src",
|
||||
"^(vhd-lib)": "$1/src",
|
||||
"^(xo-[^/]+)": "$1/src"
|
||||
},
|
||||
"projects": [
|
||||
"<rootDir>"
|
||||
],
|
||||
@@ -74,11 +65,12 @@
|
||||
"dev-test": "jest --bail --watch \"^(?!.*\\.integ\\.spec\\.js$)\"",
|
||||
"docs:dev": "vuepress dev docs",
|
||||
"docs:build": "vuepress build docs",
|
||||
"posttest": "scripts/run-script test",
|
||||
"prepare": "scripts/run-script prepare",
|
||||
"pretest": "eslint --ignore-path .gitignore .",
|
||||
"prettify": "prettier --ignore-path .gitignore --write '**/*.{js,jsx,md,mjs,ts,tsx}'",
|
||||
"test": "npm run test-lint && npm run test-unit",
|
||||
"test": "jest \"^(?!.*\\.integ\\.spec\\.js$)\"",
|
||||
"test-integration": "jest \".integ\\.spec\\.js$\"",
|
||||
"test-lint": "eslint --ignore-path .gitignore .",
|
||||
"test-unit": "jest \"^(?!.*\\.integ\\.spec\\.js$)\" && scripts/run-script test",
|
||||
"travis-tests": "scripts/travis-tests"
|
||||
},
|
||||
"workspaces": [
|
||||
|
||||
@@ -564,13 +564,9 @@ const parser = P.grammar({
|
||||
).map(_ => new Or(_[4])),
|
||||
P.seq(P.text('!'), r.ws, r.term).map(_ => new Not(_[2])),
|
||||
P.seq(P.regex(/[<>]=?/), r.rawString).map(([op, val]) => {
|
||||
let num = +val
|
||||
if (!Number.isNaN(num)) {
|
||||
num = ms(val)
|
||||
if (num === undefined) {
|
||||
throw new TypeError('value must be a number')
|
||||
}
|
||||
num += Date.now()
|
||||
val = +val
|
||||
if (Number.isNaN(val)) {
|
||||
throw new TypeError('value must be a number')
|
||||
}
|
||||
return new Comparison(op, val)
|
||||
}),
|
||||
|
||||
@@ -16,6 +16,7 @@ Installation of the [npm package](https://npmjs.org/package/value-matcher):
|
||||
|
||||
```js
|
||||
import { createPredicate } from 'value-matcher'
|
||||
|
||||
;[
|
||||
{ user: 'sam', age: 65, active: false },
|
||||
{ user: 'barney', age: 36, active: true },
|
||||
@@ -31,53 +32,6 @@ import { createPredicate } from 'value-matcher'
|
||||
// ]
|
||||
```
|
||||
|
||||
## Supported predicates
|
||||
|
||||
### `any`
|
||||
|
||||
The value must be strictly equal to the pattern.
|
||||
|
||||
```js
|
||||
const predicate = createPredicate(42)
|
||||
|
||||
predicate(42) // true
|
||||
predicate('foo') // false
|
||||
```
|
||||
|
||||
### `{ [property: string]: Pattern }`
|
||||
|
||||
The value must be an object with all pattern properties matching.
|
||||
|
||||
```js
|
||||
const predicate = createPredicate({ foo: 'bar' })
|
||||
|
||||
predicate({ foo: 'bar', baz: 42 }) // true
|
||||
predicate('foo') // false
|
||||
```
|
||||
|
||||
### `Pattern[]`
|
||||
|
||||
The value must be an array with some of its items matching each of pattern items.
|
||||
|
||||
```js
|
||||
const predicate = createPredicate([42, { foo: 'bar' }])
|
||||
|
||||
predicate([false, { foo: 'bar', baz: 42 }, null, 42]) // true
|
||||
predicate('foo') // false
|
||||
```
|
||||
|
||||
### `{ __all: Pattern[] }`
|
||||
|
||||
All patterns must match.
|
||||
|
||||
### `{ __or: Pattern[] }`
|
||||
|
||||
At least one pattern must match.
|
||||
|
||||
### `{ __not: Pattern }`
|
||||
|
||||
The pattern must not match.
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
```js
|
||||
import { createPredicate } from 'value-matcher'
|
||||
|
||||
;[
|
||||
{ user: 'sam', age: 65, active: false },
|
||||
{ user: 'barney', age: 36, active: true },
|
||||
@@ -14,50 +15,3 @@ import { createPredicate } from 'value-matcher'
|
||||
// { user: 'barney', age: 36, active: true },
|
||||
// ]
|
||||
```
|
||||
|
||||
## Supported predicates
|
||||
|
||||
### `any`
|
||||
|
||||
The value must be strictly equal to the pattern.
|
||||
|
||||
```js
|
||||
const predicate = createPredicate(42)
|
||||
|
||||
predicate(42) // true
|
||||
predicate('foo') // false
|
||||
```
|
||||
|
||||
### `{ [property: string]: Pattern }`
|
||||
|
||||
The value must be an object with all pattern properties matching.
|
||||
|
||||
```js
|
||||
const predicate = createPredicate({ foo: 'bar' })
|
||||
|
||||
predicate({ foo: 'bar', baz: 42 }) // true
|
||||
predicate('foo') // false
|
||||
```
|
||||
|
||||
### `Pattern[]`
|
||||
|
||||
The value must be an array with some of its items matching each of pattern items.
|
||||
|
||||
```js
|
||||
const predicate = createPredicate([42, { foo: 'bar' }])
|
||||
|
||||
predicate([false, { foo: 'bar', baz: 42 }, null, 42]) // true
|
||||
predicate('foo') // false
|
||||
```
|
||||
|
||||
### `{ __all: Pattern[] }`
|
||||
|
||||
All patterns must match.
|
||||
|
||||
### `{ __or: Pattern[] }`
|
||||
|
||||
At least one pattern must match.
|
||||
|
||||
### `{ __not: Pattern }`
|
||||
|
||||
The pattern must not match.
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.11.1",
|
||||
"@xen-orchestra/fs": "^0.10.4",
|
||||
"cli-progress": "^3.1.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
@@ -52,7 +52,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,12 +36,12 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"@xen-orchestra/fs": "^0.11.1",
|
||||
"@xen-orchestra/fs": "^0.10.4",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^7.0.2",
|
||||
"execa": "^4.0.2",
|
||||
"fs-promise": "^2.0.0",
|
||||
"get-stream": "^6.0.0",
|
||||
"get-stream": "^5.1.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"rimraf": "^3.0.0",
|
||||
@@ -53,7 +53,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
},
|
||||
"author": {
|
||||
|
||||
@@ -307,15 +307,19 @@ export default class Vhd {
|
||||
return this._write(blockTable.slice(i, i + 4), this.header.tableOffset + i)
|
||||
}
|
||||
|
||||
// Allocate a new uninitialized block in the BAT
|
||||
// Make a new empty block at vhd end.
|
||||
// Update block allocation table in context and in file.
|
||||
async _createBlock(blockId) {
|
||||
assert.strictEqual(this._getBatEntry(blockId), BLOCK_UNUSED)
|
||||
|
||||
const blockAddr = Math.ceil(this._getEndOfData() / SECTOR_SIZE)
|
||||
|
||||
debug(`create block ${blockId} at ${blockAddr}`)
|
||||
|
||||
await this._setBatEntry(blockId, blockAddr)
|
||||
await Promise.all([
|
||||
// Write an empty block and addr in vhd file.
|
||||
this._write(Buffer.alloc(this.fullBlockSize), sectorsToBytes(blockAddr)),
|
||||
|
||||
this._setBatEntry(blockId, blockAddr),
|
||||
])
|
||||
|
||||
return blockAddr
|
||||
}
|
||||
|
||||
@@ -37,9 +37,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"archy": "^1.0.0",
|
||||
"chalk": "^4.1.0",
|
||||
"chalk": "^3.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"human-format": "^0.11.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^0.29.0"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"dependencies": {
|
||||
"getopts": "^2.2.3",
|
||||
"golike-defer": "^0.4.1",
|
||||
"human-format": "^0.11.0",
|
||||
"human-format": "^0.10.1",
|
||||
"process-top": "^1.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"event-to-promise": "^0.8.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"jest-diff": "^26.4.2",
|
||||
"jest-diff": "^24.0.0",
|
||||
"json-rpc-protocol": "^0.13.1",
|
||||
"kindof": "^2.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
|
||||
@@ -34,17 +34,17 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"bluebird": "^3.5.1",
|
||||
"chalk": "^4.1.0",
|
||||
"chalk": "^3.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"fs-promise": "^2.0.3",
|
||||
"http-request-plus": "^0.9.1",
|
||||
"human-format": "^0.11.0",
|
||||
"human-format": "^0.10.0",
|
||||
"l33teral": "^3.0.3",
|
||||
"lodash": "^4.17.4",
|
||||
"micromatch": "^4.0.2",
|
||||
"mkdirp": "^1.0.4",
|
||||
"mkdirp": "^0.5.1",
|
||||
"nice-pipe": "0.0.0",
|
||||
"pretty-ms": "^7.0.0",
|
||||
"pretty-ms": "^5.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.15.0",
|
||||
"pump": "^3.0.0",
|
||||
|
||||
@@ -8,7 +8,7 @@ const readFile = promisify(require('fs').readFile)
|
||||
const writeFile = promisify(require('fs').writeFile)
|
||||
|
||||
const l33t = require('l33teral')
|
||||
const mkdirp = require('mkdirp')
|
||||
const mkdirp = promisify(require('mkdirp'))
|
||||
const xdgBasedir = require('xdg-basedir')
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"end-of-stream": "^1.1.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"highland": "^2.10.1",
|
||||
"through2": "^4.0.2",
|
||||
"through2": "^3.0.0",
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "xo-remote-parser",
|
||||
"version": "0.6.0",
|
||||
"version": "0.5.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -26,8 +26,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.13.1",
|
||||
"url-parse": "^1.4.7"
|
||||
"lodash": "^4.13.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -43,7 +42,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ import filter from 'lodash/filter'
|
||||
import map from 'lodash/map'
|
||||
import trim from 'lodash/trim'
|
||||
import trimStart from 'lodash/trimStart'
|
||||
import Url from 'url-parse'
|
||||
|
||||
const NFS_RE = /^([^:]+):(?:(\d+):)?([^:]+)$/
|
||||
const SMB_RE = /^([^:]+):(.+)@([^@]+)\\\\([^\0]+)(?:\0(.*))?$/
|
||||
@@ -40,13 +39,6 @@ export const parse = string => {
|
||||
object.domain = domain
|
||||
object.username = username
|
||||
object.password = password
|
||||
} else if (type === 's3') {
|
||||
const parsed = new Url(string)
|
||||
object.type = 's3'
|
||||
object.host = parsed.host
|
||||
object.path = parsed.pathname
|
||||
object.username = parsed.username
|
||||
object.password = decodeURIComponent(parsed.password)
|
||||
}
|
||||
return object
|
||||
}
|
||||
@@ -68,9 +60,6 @@ export const format = ({
|
||||
if (type === 'smb') {
|
||||
string += `${username}:${password}@${domain}\\\\${host}`
|
||||
}
|
||||
if (type === 's3') {
|
||||
string += `${username}:${encodeURIComponent(password)}@${host}`
|
||||
}
|
||||
path = sanitizePath(path)
|
||||
if (type === 'smb') {
|
||||
path = path.split('/')
|
||||
|
||||
@@ -44,17 +44,6 @@ const data = deepFreeze({
|
||||
path: '/media/nfs',
|
||||
},
|
||||
},
|
||||
S3: {
|
||||
string:
|
||||
's3://AKIAS:XSuBupZ0mJlu%2B@s3-us-west-2.amazonaws.com/test-bucket/dir',
|
||||
object: {
|
||||
type: 's3',
|
||||
host: 's3-us-west-2.amazonaws.com',
|
||||
path: '/test-bucket/dir',
|
||||
username: 'AKIAS',
|
||||
password: 'XSuBupZ0mJlu+',
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
const parseData = deepFreeze({
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-audit",
|
||||
"version": "0.8.0",
|
||||
"version": "0.6.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Audit plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -36,7 +36,6 @@
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.7.0",
|
||||
"@babel/core": "^7.7.2",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.0.0",
|
||||
"@babel/preset-env": "^7.7.1",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
@@ -50,10 +49,8 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/audit-core": "^0.1.1",
|
||||
"@xen-orchestra/cron": "^1.0.6",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
"lodash": "^4.17.19",
|
||||
"promise-toolbox": "^0.15.0",
|
||||
"readable-stream": "^3.5.0",
|
||||
"xo-common": "^0.5.0"
|
||||
|
||||
@@ -2,7 +2,6 @@ import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import { alteredAuditRecord, missingAuditRecord } from 'xo-common/api-errors'
|
||||
import { createGzip } from 'zlib'
|
||||
import { createSchedule } from '@xen-orchestra/cron'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
import {
|
||||
@@ -19,8 +18,6 @@ const DEFAULT_BLOCKED_LIST = {
|
||||
'acl.get': true,
|
||||
'acl.getCurrentPermissions': true,
|
||||
'audit.checkIntegrity': true,
|
||||
'audit.clean': true,
|
||||
'audit.deleteRange': true,
|
||||
'audit.generateFingerprint': true,
|
||||
'audit.getRecords': true,
|
||||
'backup.list': true,
|
||||
@@ -116,30 +113,6 @@ class Db extends Storage {
|
||||
getLastId() {
|
||||
return this.get(LAST_ID)
|
||||
}
|
||||
|
||||
async clean() {
|
||||
const db = this._db
|
||||
|
||||
// delete first so that a new chain can be constructed even if anything else fails
|
||||
await db.del(LAST_ID)
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
let count = 1
|
||||
const cb = () => {
|
||||
if (--count === 0) {
|
||||
resolve()
|
||||
}
|
||||
}
|
||||
const deleteEntry = key => {
|
||||
++count
|
||||
db.del(key, cb)
|
||||
}
|
||||
db.createKeyStream()
|
||||
.on('data', deleteEntry)
|
||||
.on('end', cb)
|
||||
.on('error', reject)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export const configurationSchema = {
|
||||
@@ -162,15 +135,6 @@ class AuditXoPlugin {
|
||||
this._cleaners = []
|
||||
this._xo = xo
|
||||
|
||||
const { enabled = true, schedule: { cron = '0 6 * * *', timezone } = {} } =
|
||||
staticConfig.lastHashUpload ?? {}
|
||||
|
||||
if (enabled) {
|
||||
this._uploadLastHashJob = createSchedule(cron, timezone).createJob(() =>
|
||||
this._uploadLastHash().catch(log.error)
|
||||
)
|
||||
}
|
||||
|
||||
this._auditCore = undefined
|
||||
this._storage = undefined
|
||||
|
||||
@@ -229,31 +193,10 @@ class AuditXoPlugin {
|
||||
oldest: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
const uploadLastHashJob = this._uploadLastHashJob
|
||||
if (uploadLastHashJob !== undefined) {
|
||||
uploadLastHashJob.start()
|
||||
cleaners.push(() => uploadLastHashJob.stop())
|
||||
}
|
||||
|
||||
const clean = this._storage.clean.bind(this._storage)
|
||||
clean.permission = 'admin'
|
||||
clean.description = 'Clean audit database'
|
||||
|
||||
const deleteRange = this._deleteRangeAndRewrite.bind(this)
|
||||
deleteRange.description =
|
||||
'Delete a range of records and rewrite the records chain'
|
||||
deleteRange.permission = 'admin'
|
||||
deleteRange.params = {
|
||||
newest: { type: 'string' },
|
||||
oldest: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
cleaners.push(
|
||||
this._xo.addApiMethods({
|
||||
audit: {
|
||||
checkIntegrity,
|
||||
clean,
|
||||
deleteRange,
|
||||
exportRecords,
|
||||
generateFingerprint,
|
||||
getRecords,
|
||||
@@ -329,7 +272,7 @@ class AuditXoPlugin {
|
||||
(req, res) => {
|
||||
res.set({
|
||||
'content-disposition': 'attachment',
|
||||
'content-type': 'application/x-gzip',
|
||||
'content-type': 'application/json',
|
||||
})
|
||||
return fromCallback(
|
||||
pipeline,
|
||||
@@ -342,7 +285,7 @@ class AuditXoPlugin {
|
||||
{
|
||||
suffix: `/audit-records-${new Date()
|
||||
.toISOString()
|
||||
.replace(/:/g, '_')}.ndjson.gz`,
|
||||
.replace(/:/g, '_')}.gz`,
|
||||
}
|
||||
)
|
||||
.then($getFrom => ({
|
||||
@@ -350,60 +293,6 @@ class AuditXoPlugin {
|
||||
}))
|
||||
}
|
||||
|
||||
// See www-xo#344
|
||||
async _uploadLastHash() {
|
||||
const xo = this._xo
|
||||
|
||||
// In case of non-existent XOA plugin
|
||||
if (xo.audit === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const lastRecordId = await this._storage.getLastId()
|
||||
if (lastRecordId === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const chain = await xo.audit.getLastChain()
|
||||
|
||||
let lastValidHash
|
||||
if (chain !== null) {
|
||||
const hashes = chain.hashes
|
||||
lastValidHash = hashes[hashes.length - 1]
|
||||
|
||||
if (lastValidHash === lastRecordId) {
|
||||
return
|
||||
}
|
||||
|
||||
// check the integrity of all stored hashes
|
||||
try {
|
||||
for (let i = 0; i < hashes.length - 1; ++i) {
|
||||
await this._checkIntegrity({
|
||||
oldest: hashes[i],
|
||||
newest: hashes[i + 1],
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
if (!missingAuditRecord.is(error) && !alteredAuditRecord.is(error)) {
|
||||
throw error
|
||||
}
|
||||
|
||||
lastValidHash = undefined
|
||||
}
|
||||
}
|
||||
|
||||
// generate a valid fingerprint of all stored records in case of a failure integrity check
|
||||
const { oldest, newest, error } = await this._generateFingerprint({
|
||||
oldest: lastValidHash,
|
||||
})
|
||||
|
||||
if (lastValidHash === undefined || error !== undefined) {
|
||||
await xo.audit.startNewChain({ oldest, newest })
|
||||
} else {
|
||||
await xo.audit.extendLastChain({ oldest, newest })
|
||||
}
|
||||
}
|
||||
|
||||
async _checkIntegrity(props) {
|
||||
const { oldest = NULL_ID, newest = await this._storage.getLastId() } = props
|
||||
return this._auditCore.checkIntegrity(oldest, newest).catch(error => {
|
||||
@@ -422,30 +311,19 @@ class AuditXoPlugin {
|
||||
try {
|
||||
return {
|
||||
fingerprint: `${oldest}|${newest}`,
|
||||
newest,
|
||||
nValid: await this._checkIntegrity({ oldest, newest }),
|
||||
oldest,
|
||||
}
|
||||
} catch (error) {
|
||||
if (missingAuditRecord.is(error) || alteredAuditRecord.is(error)) {
|
||||
return {
|
||||
error,
|
||||
fingerprint: `${error.data.id}|${newest}`,
|
||||
newest,
|
||||
nValid: error.data.nValid,
|
||||
oldest: error.data.id,
|
||||
error,
|
||||
}
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async _deleteRangeAndRewrite({ newest, oldest = newest }) {
|
||||
await this._auditCore.deleteRangeAndRewrite(newest, oldest)
|
||||
if (this._uploadLastHashJob !== undefined) {
|
||||
await this._uploadLastHash()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AuditXoPlugin.prototype._getRecordsStream = asyncIteratorToStream(
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-ldap",
|
||||
"version": "0.9.0",
|
||||
"version": "0.8.1",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "LDAP authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -34,7 +34,6 @@
|
||||
"node": ">=10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.11.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"inquirer": "^7.0.0",
|
||||
"ldapts": "^2.2.1",
|
||||
|
||||
@@ -32,12 +32,10 @@ export const configurationSchema = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
uri: {
|
||||
title: 'URI',
|
||||
description: 'URI of the LDAP server.',
|
||||
type: 'string',
|
||||
},
|
||||
certificateAuthorities: {
|
||||
title: 'Certificate Authorities',
|
||||
description: `
|
||||
Paths to CA certificates to use when connecting to SSL-secured LDAP servers.
|
||||
|
||||
@@ -49,24 +47,12 @@ If not specified, it will use a default set of well-known CAs.
|
||||
},
|
||||
},
|
||||
checkCertificate: {
|
||||
title: 'Check certificate',
|
||||
description:
|
||||
"Enforce the validity of the server's certificates. You can disable it when connecting to servers that use a self-signed certificate.",
|
||||
type: 'boolean',
|
||||
defaults: DEFAULTS.checkCertificate,
|
||||
},
|
||||
startTls: {
|
||||
title: 'Use StartTLS',
|
||||
type: 'boolean',
|
||||
},
|
||||
base: {
|
||||
title: 'Base',
|
||||
description:
|
||||
'The base is the part of the description tree where the users and groups are looked for.',
|
||||
type: 'string',
|
||||
},
|
||||
bind: {
|
||||
title: 'Credentials',
|
||||
description: 'Credentials to use before looking for the user record.',
|
||||
type: 'object',
|
||||
properties: {
|
||||
@@ -88,8 +74,12 @@ For Microsoft Active Directory, it can also be \`<user>@<domain>\`.
|
||||
},
|
||||
required: ['dn', 'password'],
|
||||
},
|
||||
base: {
|
||||
description:
|
||||
'The base is the part of the description tree where the users are looked for.',
|
||||
type: 'string',
|
||||
},
|
||||
filter: {
|
||||
title: 'User filter',
|
||||
description: `
|
||||
Filter used to find the user.
|
||||
|
||||
@@ -112,67 +102,9 @@ Or something like this if you also want to filter by group:
|
||||
type: 'string',
|
||||
default: DEFAULTS.filter,
|
||||
},
|
||||
userIdAttribute: {
|
||||
title: 'ID attribute',
|
||||
description:
|
||||
'Attribute used to map LDAP user to XO user. Must be unique. e.g.: `dn`',
|
||||
type: 'string',
|
||||
},
|
||||
groups: {
|
||||
title: 'Synchronize groups',
|
||||
description: 'Import groups from LDAP directory',
|
||||
type: 'object',
|
||||
properties: {
|
||||
base: {
|
||||
title: 'Base',
|
||||
description: 'Where to look for the groups.',
|
||||
type: 'string',
|
||||
},
|
||||
filter: {
|
||||
title: 'Filter',
|
||||
description:
|
||||
'Filter used to find the groups. e.g.: `(objectClass=groupOfNames)`',
|
||||
type: 'string',
|
||||
},
|
||||
idAttribute: {
|
||||
title: 'ID attribute',
|
||||
description:
|
||||
'Attribute used to map LDAP group to XO group. Must be unique. e.g.: `gid`',
|
||||
type: 'string',
|
||||
},
|
||||
displayNameAttribute: {
|
||||
title: 'Display name attribute',
|
||||
description:
|
||||
"Attribute used to determine the group's name in XO. e.g.: `cn`",
|
||||
type: 'string',
|
||||
},
|
||||
membersMapping: {
|
||||
title: 'Members mapping',
|
||||
type: 'object',
|
||||
properties: {
|
||||
groupAttribute: {
|
||||
title: 'Group attribute',
|
||||
description:
|
||||
'Attribute used to find the members of a group. e.g.: `memberUid`. The values must reference the user IDs (cf. user ID attribute)',
|
||||
type: 'string',
|
||||
},
|
||||
userAttribute: {
|
||||
title: 'User attribute',
|
||||
description:
|
||||
'User attribute used to match group members to the users. e.g.: `uidNumber`',
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['groupAttribute', 'userAttribute'],
|
||||
},
|
||||
},
|
||||
required: [
|
||||
'base',
|
||||
'filter',
|
||||
'idAttribute',
|
||||
'displayNameAttribute',
|
||||
'membersMapping',
|
||||
],
|
||||
startTls: {
|
||||
title: 'Use StartTLS',
|
||||
type: 'boolean',
|
||||
},
|
||||
},
|
||||
required: ['uri', 'base'],
|
||||
@@ -234,18 +166,12 @@ class AuthLdap {
|
||||
base: searchBase,
|
||||
filter: searchFilter = DEFAULTS.filter,
|
||||
startTls = false,
|
||||
groups,
|
||||
uri,
|
||||
userIdAttribute,
|
||||
} = conf
|
||||
|
||||
this._credentials = credentials
|
||||
this._serverUri = uri
|
||||
this._searchBase = searchBase
|
||||
this._searchFilter = searchFilter
|
||||
this._startTls = startTls
|
||||
this._groupsConfig = groups
|
||||
this._userIdAttribute = userIdAttribute
|
||||
}
|
||||
|
||||
load() {
|
||||
@@ -312,31 +238,7 @@ class AuthLdap {
|
||||
`successfully bound as ${entry.dn} => ${username} authenticated`
|
||||
)
|
||||
logger(JSON.stringify(entry, null, 2))
|
||||
|
||||
let user
|
||||
if (this._userIdAttribute === undefined) {
|
||||
// Support legacy config
|
||||
user = await this._xo.registerUser(undefined, username)
|
||||
} else {
|
||||
const ldapId = entry[this._userIdAttribute]
|
||||
user = await this._xo.registerUser2('ldap', {
|
||||
user: { id: ldapId, name: username },
|
||||
})
|
||||
|
||||
const groupsConfig = this._groupsConfig
|
||||
if (groupsConfig !== undefined) {
|
||||
try {
|
||||
await this._synchronizeGroups(
|
||||
user,
|
||||
entry[groupsConfig.membersMapping.userAttribute]
|
||||
)
|
||||
} catch(error) {
|
||||
logger(`failed to synchronize groups: ${error.message}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { userId: user.id }
|
||||
return { username }
|
||||
} catch (error) {
|
||||
logger(`failed to bind as ${entry.dn}: ${error.message}`)
|
||||
}
|
||||
@@ -348,146 +250,6 @@ class AuthLdap {
|
||||
await client.unbind()
|
||||
}
|
||||
}
|
||||
|
||||
// Synchronize user's groups OR all groups if no user is passed
|
||||
async _synchronizeGroups(user, memberId) {
|
||||
const logger = this._logger
|
||||
const client = new Client(this._clientOpts)
|
||||
|
||||
try {
|
||||
if (this._startTls) {
|
||||
await client.startTLS(this._tlsOptions)
|
||||
}
|
||||
|
||||
// Bind if necessary.
|
||||
{
|
||||
const { _credentials: credentials } = this
|
||||
if (credentials) {
|
||||
logger(`attempting to bind with as ${credentials.dn}...`)
|
||||
await client.bind(credentials.dn, credentials.password)
|
||||
logger(`successfully bound as ${credentials.dn}`)
|
||||
}
|
||||
}
|
||||
logger('syncing groups...')
|
||||
const {
|
||||
base,
|
||||
displayNameAttribute,
|
||||
filter,
|
||||
idAttribute,
|
||||
membersMapping,
|
||||
} = this._groupsConfig
|
||||
const { searchEntries: ldapGroups } = await client.search(base, {
|
||||
scope: 'sub',
|
||||
filter: filter || '', // may be undefined
|
||||
})
|
||||
|
||||
const xoUsers =
|
||||
user !== undefined &&
|
||||
(await this._xo.getAllUsers()).filter(
|
||||
user =>
|
||||
user.authProviders !== undefined && 'ldap' in user.authProviders
|
||||
)
|
||||
const xoGroups = await this._xo.getAllGroups()
|
||||
|
||||
// For each LDAP group:
|
||||
// - create/update/delete the corresponding XO group
|
||||
// - add/remove the LDAP-provided users
|
||||
// One by one to avoid race conditions
|
||||
for (const ldapGroup of ldapGroups) {
|
||||
const groupLdapId = ldapGroup[idAttribute]
|
||||
const groupLdapName = ldapGroup[displayNameAttribute]
|
||||
|
||||
// Empty or undefined names/IDs are invalid
|
||||
if (!groupLdapId || !groupLdapName) {
|
||||
logger(`Invalid group ID (${groupLdapId}) or name (${groupLdapName})`)
|
||||
continue
|
||||
}
|
||||
|
||||
let ldapGroupMembers = ldapGroup[membersMapping.groupAttribute]
|
||||
ldapGroupMembers = Array.isArray(ldapGroupMembers)
|
||||
? ldapGroupMembers
|
||||
: [ldapGroupMembers]
|
||||
|
||||
// If a user was passed, only update the user's groups
|
||||
if (user !== undefined && !ldapGroupMembers.includes(memberId)) {
|
||||
continue
|
||||
}
|
||||
|
||||
let xoGroup
|
||||
const xoGroupIndex = xoGroups.findIndex(
|
||||
group =>
|
||||
group.provider === 'ldap' && group.providerGroupId === groupLdapId
|
||||
)
|
||||
|
||||
if (xoGroupIndex === -1) {
|
||||
if (
|
||||
xoGroups.find(group => group.name === groupLdapName) !== undefined
|
||||
) {
|
||||
// TODO: check against LDAP groups that are being created as well
|
||||
logger(`A group called ${groupLdapName} already exists`)
|
||||
continue
|
||||
}
|
||||
xoGroup = await this._xo.createGroup({
|
||||
name: groupLdapName,
|
||||
provider: 'ldap',
|
||||
providerGroupId: groupLdapId,
|
||||
})
|
||||
} else {
|
||||
// Remove it from xoGroups as we will then delete all the remaining
|
||||
// LDAP-provided groups
|
||||
;[xoGroup] = xoGroups.splice(xoGroupIndex, 1)
|
||||
await this._xo.updateGroup(xoGroup.id, { name: groupLdapName })
|
||||
xoGroup = await this._xo.getGroup(xoGroup.id)
|
||||
}
|
||||
|
||||
// If a user was passed, only add that user to the group and don't
|
||||
// delete any groups (ie return immediately)
|
||||
if (user !== undefined) {
|
||||
await this._xo.addUserToGroup(user.id, xoGroup.id)
|
||||
continue
|
||||
}
|
||||
|
||||
const xoGroupMembers =
|
||||
xoGroup.users === undefined ? [] : xoGroup.users.slice(0)
|
||||
|
||||
for (const ldapId of ldapGroupMembers) {
|
||||
const xoUser = xoUsers.find(
|
||||
user => user.authProviders.ldap.id === ldapId
|
||||
)
|
||||
if (xoUser === undefined) {
|
||||
continue
|
||||
}
|
||||
// If the user exists in XO, should be a member of the LDAP-provided
|
||||
// group but isn't: add it
|
||||
const userIdIndex = xoGroupMembers.findIndex(id => id === xoUser.id)
|
||||
if (userIdIndex !== -1) {
|
||||
xoGroupMembers.splice(userIdIndex, 1)
|
||||
continue
|
||||
}
|
||||
|
||||
await this._xo.addUserToGroup(xoUser.id, xoGroup.id)
|
||||
}
|
||||
|
||||
// All the remaining users of that group can be removed from it since
|
||||
// they're not in the LDAP group
|
||||
for (const userId of xoGroupMembers) {
|
||||
await this._xo.removeUserFromGroup(userId, xoGroup.id)
|
||||
}
|
||||
}
|
||||
|
||||
if (user === undefined) {
|
||||
// All the remaining groups provided by LDAP can be removed from XO since
|
||||
// they don't exist in the LDAP directory any more
|
||||
await Promise.all(
|
||||
xoGroups
|
||||
.filter(group => group.provider === 'ldap')
|
||||
.map(group => this._xo.deleteGroup(group.id))
|
||||
)
|
||||
}
|
||||
} finally {
|
||||
await client.unbind()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-backup-reports",
|
||||
"version": "0.16.7",
|
||||
"version": "0.16.6",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Backup reports plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -38,7 +38,7 @@
|
||||
"dependencies": {
|
||||
"@xen-orchestra/defined": "^0.0.0",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"human-format": "^0.11.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.13.1",
|
||||
"moment-timezone": "^0.5.13"
|
||||
},
|
||||
|
||||
@@ -272,7 +272,7 @@ class BackupReportsXoPlugin {
|
||||
}
|
||||
}
|
||||
|
||||
async _metadataHandler(log, { name: jobName }, schedule, force) {
|
||||
async _metadataHandler(log, job, schedule, force) {
|
||||
const xo = this._xo
|
||||
|
||||
const formatDate = createDateFormatter(schedule?.timezone)
|
||||
@@ -290,7 +290,7 @@ class BackupReportsXoPlugin {
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **Job ID**: ${log.jobId}`,
|
||||
`- **Job name**: ${jobName}`,
|
||||
`- **Job name**: ${job.name}`,
|
||||
`- **Run ID**: ${log.id}`,
|
||||
...getTemporalDataMarkdown(log.end, log.start, formatDate),
|
||||
n !== 0 && `- **Successes**: ${nSuccesses} / ${n}`,
|
||||
@@ -349,10 +349,12 @@ class BackupReportsXoPlugin {
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
|
||||
return this._sendReport({
|
||||
job,
|
||||
subject: `[Xen Orchestra] ${log.status} − Metadata backup report for ${
|
||||
log.jobName
|
||||
} ${STATUS_ICON[log.status]}`,
|
||||
markdown: toMarkdown(markdown),
|
||||
schedule,
|
||||
success: log.status === 'success',
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
@@ -363,10 +365,10 @@ class BackupReportsXoPlugin {
|
||||
})
|
||||
}
|
||||
|
||||
async _ngVmHandler(log, { name: jobName, settings }, schedule, force) {
|
||||
async _ngVmHandler(log, job, schedule, force) {
|
||||
const xo = this._xo
|
||||
|
||||
const mailReceivers = get(() => settings[''].reportRecipients)
|
||||
const mailReceivers = get(() => job.settings[''].reportRecipients)
|
||||
const { reportWhen, mode } = log.data || {}
|
||||
|
||||
const formatDate = createDateFormatter(schedule?.timezone)
|
||||
@@ -385,12 +387,17 @@ class BackupReportsXoPlugin {
|
||||
'',
|
||||
`*${pkg.name} v${pkg.version}*`,
|
||||
]
|
||||
|
||||
const jobName = job.name
|
||||
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${
|
||||
log.status
|
||||
} − Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
|
||||
job,
|
||||
mailReceivers,
|
||||
markdown: toMarkdown(markdown),
|
||||
schedule,
|
||||
success: false,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${
|
||||
log.status
|
||||
@@ -513,8 +520,6 @@ class BackupReportsXoPlugin {
|
||||
} else {
|
||||
globalTransferSize += size
|
||||
}
|
||||
} else if (operationLog.status === 'success') {
|
||||
return
|
||||
}
|
||||
|
||||
const operationText = [
|
||||
@@ -651,8 +656,10 @@ class BackupReportsXoPlugin {
|
||||
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
return this._sendReport({
|
||||
job,
|
||||
mailReceivers,
|
||||
markdown: toMarkdown(markdown),
|
||||
schedule,
|
||||
subject: `[Xen Orchestra] ${log.status} − Backup report for ${jobName} ${
|
||||
STATUS_ICON[log.status]
|
||||
}`,
|
||||
@@ -726,7 +733,9 @@ class BackupReportsXoPlugin {
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
|
||||
job,
|
||||
markdown,
|
||||
schedule,
|
||||
success: false,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${error.message}`,
|
||||
})
|
||||
@@ -915,6 +924,7 @@ class BackupReportsXoPlugin {
|
||||
markdown = markdown.join('\n')
|
||||
|
||||
return this._sendReport({
|
||||
job,
|
||||
markdown,
|
||||
subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${tag} ${
|
||||
globalSuccess
|
||||
@@ -923,6 +933,7 @@ class BackupReportsXoPlugin {
|
||||
? ICON_FAILURE
|
||||
: ICON_SKIPPED
|
||||
}`,
|
||||
schedule,
|
||||
success: globalSuccess,
|
||||
nagiosMarkdown: globalSuccess
|
||||
? `[Xen Orchestra] [Success] Backup report for ${tag}`
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.6",
|
||||
"d3-time-format": "^3.0.0",
|
||||
"d3-time-format": "^2.1.1",
|
||||
"json5": "^2.0.1",
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"version": "1.0.4",
|
||||
"version": "0.4.3",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -30,9 +30,6 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"@xen-orchestra/openflow": "^0.1.1",
|
||||
"@vates/coalesce-calls": "^0.1.0",
|
||||
"ipaddr.js": "^1.9.1",
|
||||
"lodash": "^4.17.11",
|
||||
"node-openssl-cert": "^0.0.117",
|
||||
"promise-toolbox": "^0.15.0",
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import NodeOpenssl from 'node-openssl-cert'
|
||||
import uuidv4 from 'uuid/v4'
|
||||
import { access, constants, readFile, writeFile } from 'fs'
|
||||
import { EventEmitter } from 'events'
|
||||
import { filter, find, forOwn, map, omitBy } from 'lodash'
|
||||
import { fromCallback, promisify } from 'promise-toolbox'
|
||||
import { join } from 'path'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
|
||||
import { OpenFlowChannel } from './protocol/openflow-channel'
|
||||
import { OvsdbClient } from './protocol/ovsdb-client'
|
||||
import { PrivateNetwork } from './private-network/private-network'
|
||||
import { TlsHelper } from './utils/tls-helper'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
@@ -50,10 +48,6 @@ export const configurationSchema = {
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const fileWrite = promisify(writeFile)
|
||||
const fileRead = promisify(readFile)
|
||||
async function fileExists(path) {
|
||||
@@ -241,28 +235,12 @@ async function createTunnel(host, network) {
|
||||
return
|
||||
}
|
||||
|
||||
const encapsulation = otherConfig['xo:sdn-controller:encapsulation'] ?? 'gre'
|
||||
try {
|
||||
let tunnelRef
|
||||
try {
|
||||
tunnelRef = await host.$xapi.call(
|
||||
'tunnel.create',
|
||||
hostPif.$ref,
|
||||
network.$ref,
|
||||
encapsulation
|
||||
)
|
||||
} catch (error) {
|
||||
if (error.code === 'MESSAGE_PARAMETER_COUNT_MISMATCH') {
|
||||
// Before 8.2, protocol field did not exist, let's try without it!
|
||||
tunnelRef = await host.$xapi.call(
|
||||
'tunnel.create',
|
||||
hostPif.$ref,
|
||||
network.$ref
|
||||
)
|
||||
} else {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
const tunnelRef = await host.$xapi.call(
|
||||
'tunnel.create',
|
||||
hostPif.$ref,
|
||||
network.$ref
|
||||
)
|
||||
const tunnel = await host.$xapi._getOrWaitObject(tunnelRef)
|
||||
await tunnel.$xapi._waitObjectState(
|
||||
tunnel.access_PIF,
|
||||
@@ -334,10 +312,6 @@ class SDNController extends EventEmitter {
|
||||
- `status`:
|
||||
- `active`: `true` if the corresponding OpenVSwitch bridge is correctly configured and working
|
||||
- `key` : Corresponding OpenVSwitch bridge name (missing if `active` is `false`)
|
||||
|
||||
Attributes on VIFs (OpenFlow entries):
|
||||
- `other_config`:
|
||||
- `xo:sdn-controller:of-rules`: A list of openflow entries to aply to this VIF
|
||||
*/
|
||||
|
||||
constructor({ xo, getDataDir }) {
|
||||
@@ -361,12 +335,6 @@ class SDNController extends EventEmitter {
|
||||
this._prevVni = 0
|
||||
|
||||
this.ovsdbClients = {}
|
||||
this.ofChannels = {}
|
||||
|
||||
this._tlsHelper = new TlsHelper()
|
||||
|
||||
this._handledTasks = []
|
||||
this._managed = []
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -401,11 +369,10 @@ class SDNController extends EventEmitter {
|
||||
fileRead(join(certDirectory, CLIENT_CERT)),
|
||||
fileRead(join(certDirectory, CA_CERT)),
|
||||
])
|
||||
this._tlsHelper.updateCertificates(
|
||||
this._clientKey,
|
||||
this._clientCert,
|
||||
this._caCert
|
||||
)
|
||||
|
||||
forOwn(this.ovsdbClients, client => {
|
||||
client.updateCertificates(this._clientKey, this._clientCert, this._caCert)
|
||||
})
|
||||
const updatedPools = []
|
||||
await Promise.all(
|
||||
map(this.privateNetworks, async privateNetworks => {
|
||||
@@ -423,8 +390,7 @@ class SDNController extends EventEmitter {
|
||||
}
|
||||
|
||||
async load() {
|
||||
// ---------------- Private Network method ---------------------------------
|
||||
|
||||
// Expose method to create private network
|
||||
const createPrivateNetwork = params =>
|
||||
this._createPrivateNetwork({
|
||||
encrypted: false,
|
||||
@@ -455,41 +421,10 @@ class SDNController extends EventEmitter {
|
||||
mtu: { type: 'integer', optional: true },
|
||||
preferredCenterId: { type: 'string', optional: true },
|
||||
}
|
||||
createPrivateNetwork.permission = 'admin'
|
||||
|
||||
// ---------------- OpenFlow rules method ----------------------------------
|
||||
|
||||
const addRule = params => this._addRule(params)
|
||||
addRule.description = 'Add an ACL rule to a VIF'
|
||||
addRule.params = {
|
||||
allow: { type: 'boolean' },
|
||||
direction: { type: 'string' },
|
||||
ipRange: { type: 'string', optional: true },
|
||||
port: { type: 'integer', optional: true },
|
||||
protocol: { type: 'string', optional: true },
|
||||
vifId: { type: 'string' },
|
||||
}
|
||||
addRule.permission = 'admin'
|
||||
|
||||
const deleteRule = params => this._deleteRule(params)
|
||||
deleteRule.description = 'Delete an ACL rule from a VIF'
|
||||
deleteRule.params = {
|
||||
direction: { type: 'string' },
|
||||
ipRange: { type: 'string', optional: true },
|
||||
port: { type: 'integer', optional: true },
|
||||
protocol: { type: 'string', optional: true },
|
||||
vifId: { type: 'string' },
|
||||
}
|
||||
deleteRule.permission = 'admin'
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
this._unsetApiMethods = this._xo.addApiMethods({
|
||||
sdnController: {
|
||||
createPrivateNetwork,
|
||||
|
||||
addRule,
|
||||
deleteRule,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -521,10 +456,6 @@ class SDNController extends EventEmitter {
|
||||
this._cleaners = []
|
||||
|
||||
this.ovsdbClients = {}
|
||||
this.ofChannels = {}
|
||||
|
||||
this._handledTasks = []
|
||||
this._managed = []
|
||||
|
||||
this._unsetApiMethods()
|
||||
}
|
||||
@@ -552,8 +483,7 @@ class SDNController extends EventEmitter {
|
||||
this._cleaners.push(await this._manageXapi(xapi))
|
||||
const hosts = filter(xapi.objects.all, { $type: 'host' })
|
||||
for (const host of hosts) {
|
||||
this._getOrCreateOvsdbClient(host)
|
||||
this._getOrCreateOfChannel(host)
|
||||
this._createOvsdbClient(host)
|
||||
}
|
||||
|
||||
// Add already existing private networks
|
||||
@@ -666,13 +596,6 @@ class SDNController extends EventEmitter {
|
||||
await this._electNewCenter(privateNetwork)
|
||||
})
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
const vifs = filter(xapi.objects.all, { $type: 'VIF' })
|
||||
for (const vif of vifs) {
|
||||
await this._applyVifOfRules(vif)
|
||||
}
|
||||
} catch (error) {
|
||||
log.error('Error while handling xapi connection', {
|
||||
id: xapi.pool.uuid,
|
||||
@@ -709,112 +632,6 @@ class SDNController extends EventEmitter {
|
||||
|
||||
// ===========================================================================
|
||||
|
||||
async _addRule({ allow, direction, ipRange = '', port, protocol, vifId }) {
|
||||
const vif = this._xo.getXapiObject(this._xo.getObject(vifId, 'VIF'))
|
||||
try {
|
||||
assert(vif.currently_attached, 'VIF needs to be plugged to add rule')
|
||||
await this._setPoolControllerIfNeeded(vif.$pool)
|
||||
|
||||
const client = this._getOrCreateOvsdbClient(vif.$VM.$resident_on)
|
||||
const channel = this._getOrCreateOfChannel(vif.$VM.$resident_on)
|
||||
const ofport = await client.getOfPortForVif(vif)
|
||||
await channel.addRule(
|
||||
vif,
|
||||
allow,
|
||||
protocol,
|
||||
port,
|
||||
ipRange,
|
||||
direction,
|
||||
ofport
|
||||
)
|
||||
const vifRules = vif.other_config['xo:sdn-controller:of-rules']
|
||||
const newVifRules = vifRules !== undefined ? JSON.parse(vifRules) : []
|
||||
const stringRule = JSON.stringify({
|
||||
allow,
|
||||
protocol,
|
||||
port,
|
||||
ipRange,
|
||||
direction,
|
||||
})
|
||||
if (!newVifRules.includes(stringRule)) {
|
||||
newVifRules.push(stringRule)
|
||||
await vif.update_other_config(
|
||||
'xo:sdn-controller:of-rules',
|
||||
JSON.stringify(newVifRules)
|
||||
)
|
||||
}
|
||||
} catch (error) {
|
||||
log.error('Error while adding OF rule', {
|
||||
error,
|
||||
vif: vif.uuid,
|
||||
host: vif.$VM.$resident_on.uuid,
|
||||
allow,
|
||||
protocol,
|
||||
port,
|
||||
ipRange,
|
||||
direction,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _deleteRule(
|
||||
{ direction, ipRange = '', port, protocol, vifId },
|
||||
updateOtherConfig = true
|
||||
) {
|
||||
let vif = this._xo.getXapiObject(this._xo.getObject(vifId, 'VIF'))
|
||||
try {
|
||||
await this._setPoolControllerIfNeeded(vif.$pool)
|
||||
|
||||
const client = this._getOrCreateOvsdbClient(vif.$VM.$resident_on)
|
||||
const channel = this._getOrCreateOfChannel(vif.$VM.$resident_on)
|
||||
const ofport = await client.getOfPortForVif(vif)
|
||||
await channel.deleteRule(vif, protocol, port, ipRange, direction, ofport)
|
||||
if (!updateOtherConfig) {
|
||||
return
|
||||
}
|
||||
|
||||
const vifRules = vif.other_config['xo:sdn-controller:of-rules']
|
||||
if (vifRules === undefined) {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
const newVifRules = JSON.parse(vifRules).filter(vifRule => {
|
||||
const rule = JSON.parse(vifRule)
|
||||
return (
|
||||
rule.protocol !== protocol ||
|
||||
rule.port !== port ||
|
||||
rule.ipRange !== ipRange ||
|
||||
rule.direction !== direction
|
||||
)
|
||||
})
|
||||
|
||||
await vif.update_other_config(
|
||||
'xo:sdn-controller:of-rules',
|
||||
Object.keys(newVifRules).length === 0
|
||||
? null
|
||||
: JSON.stringify(newVifRules)
|
||||
)
|
||||
|
||||
vif = await vif.$xapi.barrier(vif.$ref)
|
||||
|
||||
// Put back rules that could have been wrongfully deleted because delete rule too general
|
||||
await this._applyVifOfRules(vif)
|
||||
} catch (error) {
|
||||
log.error('Error while adding OF rule', {
|
||||
error,
|
||||
vif: vif.uuid,
|
||||
host: vif.$VM.$resident_on.uuid,
|
||||
protocol,
|
||||
port,
|
||||
ipRange,
|
||||
direction,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _createPrivateNetwork({
|
||||
poolIds,
|
||||
pifIds,
|
||||
@@ -844,6 +661,8 @@ class SDNController extends EventEmitter {
|
||||
|
||||
const privateNetwork = new PrivateNetwork(this, uuidv4(), preferredCenter)
|
||||
for (const pool of pools) {
|
||||
await this._setPoolControllerIfNeeded(pool)
|
||||
|
||||
const pifId = pifIds.find(id => {
|
||||
const pif = this._xo.getXapiObject(this._xo.getObject(id, 'PIF'))
|
||||
return pif.$pool.$ref === pool.$ref
|
||||
@@ -884,11 +703,9 @@ class SDNController extends EventEmitter {
|
||||
await Promise.all(
|
||||
map(hosts, async host => {
|
||||
await createTunnel(host, createdNetwork)
|
||||
this._getOrCreateOvsdbClient(host)
|
||||
this._getOrCreateOfChannel(host)
|
||||
this._createOvsdbClient(host)
|
||||
})
|
||||
)
|
||||
await this._setPoolControllerIfNeeded(pool)
|
||||
|
||||
await privateNetwork.addNetwork(createdNetwork)
|
||||
this._networks.set(createdNetwork.$id, createdNetwork.$ref)
|
||||
@@ -904,10 +721,6 @@ class SDNController extends EventEmitter {
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _manageXapi(xapi) {
|
||||
if (this._managed.includes(xapi.pool.uuid)) {
|
||||
return noop // pushed in _cleaners
|
||||
}
|
||||
|
||||
const { objects } = xapi
|
||||
|
||||
const objectsRemovedXapi = this._objectsRemoved.bind(this, xapi)
|
||||
@@ -916,7 +729,6 @@ class SDNController extends EventEmitter {
|
||||
objects.on('remove', objectsRemovedXapi)
|
||||
|
||||
await this._installCaCertificateIfNeeded(xapi)
|
||||
this._managed.push(xapi.pool.uuid)
|
||||
|
||||
return () => {
|
||||
objects.removeListener('add', this._objectsAdded)
|
||||
@@ -926,7 +738,7 @@ class SDNController extends EventEmitter {
|
||||
}
|
||||
|
||||
_objectsAdded(objects) {
|
||||
forOwn(objects, async object => {
|
||||
forOwn(objects, object => {
|
||||
const { $type } = object
|
||||
|
||||
if ($type === 'host') {
|
||||
@@ -938,18 +750,7 @@ class SDNController extends EventEmitter {
|
||||
if (!this._newHosts.some(_ => _.$ref === object.$ref)) {
|
||||
this._newHosts.push(object)
|
||||
}
|
||||
this._getOrCreateOvsdbClient(object)
|
||||
this._getOrCreateOfChannel(object)
|
||||
} else if ($type === 'PIF') {
|
||||
log.debug('New PIF', {
|
||||
device: object.device,
|
||||
host: object.$host.name_label,
|
||||
network: object.$network.name_label,
|
||||
pool: object.$pool.name_label,
|
||||
})
|
||||
|
||||
const client = this.ovsdbClients[object.host]
|
||||
client.setBridgeControllerForNetwork(object.$network)
|
||||
this._createOvsdbClient(object)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -964,10 +765,6 @@ class SDNController extends EventEmitter {
|
||||
await this._hostUpdated(object)
|
||||
} else if ($type === 'host_metrics') {
|
||||
await this._hostMetricsUpdated(object)
|
||||
} else if ($type === 'VM') {
|
||||
await this._vmUpdated(object)
|
||||
} else if ($type === 'VIF') {
|
||||
await this._vifUpdated(object)
|
||||
}
|
||||
} catch (error) {
|
||||
log.error('Error in _objectsUpdated', {
|
||||
@@ -985,10 +782,6 @@ class SDNController extends EventEmitter {
|
||||
this.ovsdbClients,
|
||||
client => client.host.$id === id
|
||||
)
|
||||
this.ofChannels = omitBy(
|
||||
this.ofChannels,
|
||||
channel => channel.host.$id === id
|
||||
)
|
||||
|
||||
// If a Star center host is removed: re-elect a new center where needed
|
||||
const starCenterRef = this._starCenters.get(id)
|
||||
@@ -1105,8 +898,6 @@ class SDNController extends EventEmitter {
|
||||
})
|
||||
}
|
||||
|
||||
this._setBridgeControllerForHost(host)
|
||||
|
||||
const privateNetworks = filter(
|
||||
this.privateNetworks,
|
||||
privateNetwork => privateNetwork[host.$pool.uuid] !== undefined
|
||||
@@ -1137,64 +928,6 @@ class SDNController extends EventEmitter {
|
||||
return this._hostUnreachable(ovsdbClient.host)
|
||||
}
|
||||
|
||||
async _vmUpdated(vm) {
|
||||
forOwn(vm.current_operations, async (value, key) => {
|
||||
if (this._handledTasks.includes(key)) {
|
||||
return
|
||||
}
|
||||
|
||||
this._handledTasks.push(key)
|
||||
// Clean before task ends
|
||||
if (
|
||||
value === 'migrate_send' ||
|
||||
value === 'pool_migrate' ||
|
||||
value === 'clean_reboot' ||
|
||||
value === 'hard_reboot' ||
|
||||
value === 'hard_shutdown' ||
|
||||
value === 'clean_shutdown'
|
||||
) {
|
||||
await this._cleanOfRules(vm)
|
||||
}
|
||||
|
||||
await vm.$xapi.watchTask(key).catch(noop)
|
||||
// Re-apply rules after task ended
|
||||
if (
|
||||
value === 'migrate_send' ||
|
||||
value === 'pool_migrate' ||
|
||||
value === 'clean_reboot' ||
|
||||
value === 'hard_reboot' ||
|
||||
value === 'start' ||
|
||||
value === 'start_on'
|
||||
) {
|
||||
vm = await vm.$xapi.barrier(vm.$ref)
|
||||
await this._applyOfRules(vm)
|
||||
}
|
||||
|
||||
this._handledTasks = filter(this._handledTasks, ref => ref !== key)
|
||||
})
|
||||
}
|
||||
|
||||
async _vifUpdated(vif) {
|
||||
await Promise.all(
|
||||
map(vif.current_operations, async (value, key) => {
|
||||
if (this._handledTasks.includes(key)) {
|
||||
return
|
||||
}
|
||||
|
||||
this._handledTasks.push(key)
|
||||
if (value === 'plug') {
|
||||
await vif.$xapi.watchTask(key).catch(noop)
|
||||
vif = await vif.$xapi.barrier(vif.$ref)
|
||||
await this._applyVifOfRules(vif)
|
||||
} else if (value === 'unplug' || value === 'unplug_force') {
|
||||
await this._cleanVifOfRules(vif)
|
||||
await vif.$xapi.watchTask(key).catch(noop)
|
||||
}
|
||||
this._handledTasks = filter(this._handledTasks, ref => ref !== key)
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _setPoolControllerIfNeeded(pool) {
|
||||
@@ -1215,20 +948,9 @@ class SDNController extends EventEmitter {
|
||||
})
|
||||
}
|
||||
|
||||
const hosts = filter(pool.$xapi.objects.all, { $type: 'host' })
|
||||
await Promise.all(
|
||||
hosts.map(host => {
|
||||
return this._setBridgeControllerForHost(host)
|
||||
})
|
||||
)
|
||||
this._cleaners.push(await this._manageXapi(pool.$xapi))
|
||||
}
|
||||
|
||||
_setBridgeControllerForHost(host) {
|
||||
const client = this.ovsdbClients[host.$ref]
|
||||
return client.setBridgeController()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _installCaCertificateIfNeeded(xapi) {
|
||||
@@ -1394,60 +1116,18 @@ class SDNController extends EventEmitter {
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _applyVifOfRules(vif) {
|
||||
if (!vif.currently_attached) {
|
||||
_createOvsdbClient(host) {
|
||||
if (this.ovsdbClients[host.$ref] !== undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const vifRules = vif.other_config['xo:sdn-controller:of-rules']
|
||||
const parsedRules = vifRules !== undefined ? JSON.parse(vifRules) : []
|
||||
for (const stringRule of parsedRules) {
|
||||
const rule = JSON.parse(stringRule)
|
||||
await this._addRule({ ...rule, vifId: vif.$id })
|
||||
}
|
||||
}
|
||||
|
||||
async _cleanVifOfRules(vif) {
|
||||
const vifRules = vif.other_config['xo:sdn-controller:of-rules']
|
||||
const parsedRules = vifRules !== undefined ? JSON.parse(vifRules) : []
|
||||
for (const stringRule of parsedRules) {
|
||||
const rule = JSON.parse(stringRule)
|
||||
await this._deleteRule({ ...rule, vifId: vif.$id }, false)
|
||||
}
|
||||
}
|
||||
|
||||
async _cleanOfRules(vm) {
|
||||
for (const vif of vm.$VIFs) {
|
||||
await this._cleanVifOfRules(vif)
|
||||
}
|
||||
}
|
||||
|
||||
async _applyOfRules(vm) {
|
||||
for (const vif of vm.$VIFs) {
|
||||
await this._applyVifOfRules(vif)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_getOrCreateOvsdbClient(host) {
|
||||
let client = this.ovsdbClients[host.$ref]
|
||||
if (client === undefined) {
|
||||
client = new OvsdbClient(host, this._tlsHelper)
|
||||
this.ovsdbClients[host.$ref] = client
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
_getOrCreateOfChannel(host) {
|
||||
let channel = this.ofChannels[host.$ref]
|
||||
if (channel === undefined) {
|
||||
channel = new OpenFlowChannel(host, this._tlsHelper)
|
||||
this.ofChannels[host.$ref] = channel
|
||||
}
|
||||
|
||||
return channel
|
||||
const client = new OvsdbClient(
|
||||
host,
|
||||
this._clientKey,
|
||||
this._clientCert,
|
||||
this._caCert
|
||||
)
|
||||
this.ovsdbClients[host.$ref] = client
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import { filter, forOwn, sample } from 'lodash'
|
||||
|
||||
@@ -62,39 +61,13 @@ export class PrivateNetwork {
|
||||
otherConfig['xo:sdn-controller:encrypted'] === 'true'
|
||||
? createPassword()
|
||||
: undefined
|
||||
const pifDevice = otherConfig['xo:sdn-controller:pif-device']
|
||||
const pifVlan = +otherConfig['xo:sdn-controller:vlan']
|
||||
const hostPif = hostClient.host.$PIFs.find(
|
||||
pif =>
|
||||
pif?.device === pifDevice &&
|
||||
pif.VLAN === pifVlan &&
|
||||
pif.ip_configuration_mode !== 'None'
|
||||
)
|
||||
const centerPif = centerClient.host.$PIFs.find(
|
||||
pif =>
|
||||
pif?.device === pifDevice &&
|
||||
pif.VLAN === pifVlan &&
|
||||
pif.ip_configuration_mode !== 'None'
|
||||
)
|
||||
assert(hostPif !== undefined, 'No PIF found', {
|
||||
privateNetwork: this.uuid,
|
||||
pifDevice,
|
||||
pifVlan,
|
||||
host: host.name_label,
|
||||
})
|
||||
assert(centerPif !== undefined, 'No PIF found in center', {
|
||||
privateNetwork: this.uuid,
|
||||
pifDevice,
|
||||
pifVlan,
|
||||
host: this.center.name_label,
|
||||
})
|
||||
|
||||
let bridgeName
|
||||
try {
|
||||
;[bridgeName] = await Promise.all([
|
||||
hostClient.addInterfaceAndPort(
|
||||
network,
|
||||
centerPif.IP,
|
||||
centerClient.host.address,
|
||||
encapsulation,
|
||||
vni,
|
||||
password,
|
||||
@@ -102,7 +75,7 @@ export class PrivateNetwork {
|
||||
),
|
||||
centerClient.addInterfaceAndPort(
|
||||
centerNetwork,
|
||||
hostPif.IP,
|
||||
hostClient.host.address,
|
||||
encapsulation,
|
||||
vni,
|
||||
password,
|
||||
|
||||
@@ -1,394 +0,0 @@
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import ipaddr from 'ipaddr.js'
|
||||
import openflow from '@xen-orchestra/openflow'
|
||||
import parse from '@xen-orchestra/openflow/parse-socket'
|
||||
|
||||
import { coalesceCalls } from '@vates/coalesce-calls'
|
||||
import { EventEmitter } from 'events'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller:openflow-controller')
|
||||
|
||||
const version = openflow.versions.openFlow11
|
||||
const ofProtocol = openflow.protocols[version]
|
||||
const OPENFLOW_PORT = ofProtocol.sslPort
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const parseIp = ipAddress => {
|
||||
if (ipAddress === '') {
|
||||
return
|
||||
}
|
||||
|
||||
let addr, mask
|
||||
if (ipAddress.includes('/')) {
|
||||
const ip = ipaddr.parseCIDR(ipAddress)
|
||||
addr = ip[0].toString()
|
||||
const maskOctets = ipaddr.IPv4.subnetMaskFromPrefixLength(ip[1]).octets
|
||||
mask = ipaddr.fromByteArray(maskOctets.map(i => 255 - i)).toString() // Use wildcarded mask
|
||||
} else {
|
||||
// TODO: return ipAddress directly?
|
||||
const ip = ipaddr.parse(ipAddress)
|
||||
addr = ip.toString()
|
||||
}
|
||||
|
||||
return { addr, mask }
|
||||
}
|
||||
|
||||
const dlAndNwProtocolFromString = protocol => {
|
||||
switch (protocol) {
|
||||
case 'IP':
|
||||
return { dlType: ofProtocol.dlType.ip }
|
||||
case 'ICMP':
|
||||
return {
|
||||
dlType: ofProtocol.dlType.ip,
|
||||
nwProto: ofProtocol.nwProto.icmp,
|
||||
}
|
||||
case 'TCP':
|
||||
return {
|
||||
dlType: ofProtocol.dlType.ip,
|
||||
nwProto: ofProtocol.nwProto.tcp,
|
||||
}
|
||||
case 'UDP':
|
||||
return {
|
||||
dlType: ofProtocol.dlType.ip,
|
||||
nwProto: ofProtocol.nwProto.udp,
|
||||
}
|
||||
|
||||
case 'ARP':
|
||||
return { dlType: ofProtocol.dlType.arp }
|
||||
default:
|
||||
return {} // TODO: Error?
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export class OpenFlowChannel extends EventEmitter {
|
||||
/*
|
||||
Create an SSL connection to an XCP-ng host.
|
||||
Interact with the host's OpenVSwitch (OVS) daemon to manage its flows with OpenFlow11.
|
||||
See:
|
||||
- OpenFlow11 spec: https://www.opennetworking.org/wp-content/uploads/2014/10/openflow-spec-v1.1.0.pdf
|
||||
*/
|
||||
|
||||
constructor(host, tlsHelper) {
|
||||
super()
|
||||
|
||||
this.host = host
|
||||
this._tlsHelper = tlsHelper
|
||||
this._coalesceConnect = coalesceCalls(this._connect)
|
||||
this._socket = undefined
|
||||
|
||||
log.debug('New OpenFlow channel', {
|
||||
host: this.host.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async addRule(vif, allow, protocol, port, ipRange, direction, ofport) {
|
||||
log.info('Adding OF rule', {
|
||||
allow,
|
||||
protocol,
|
||||
port,
|
||||
ipRange,
|
||||
direction,
|
||||
vif: vif.uuid,
|
||||
})
|
||||
const instructions = [
|
||||
{
|
||||
type: ofProtocol.instructionType.applyActions,
|
||||
actions: allow
|
||||
? [
|
||||
{
|
||||
type: ofProtocol.actionType.output,
|
||||
port: ofProtocol.port.normal,
|
||||
},
|
||||
]
|
||||
: [],
|
||||
},
|
||||
]
|
||||
|
||||
const ip = parseIp(ipRange)
|
||||
const { dlType, nwProto } = dlAndNwProtocolFromString(protocol)
|
||||
const mac = vif.MAC
|
||||
|
||||
await this._coalesceConnect()
|
||||
if (direction.includes('from')) {
|
||||
this._addFlow(
|
||||
{
|
||||
type: ofProtocol.matchType.standard,
|
||||
dl_type: dlType,
|
||||
dl_src: mac,
|
||||
nw_proto: nwProto,
|
||||
nw_dst: ip?.addr,
|
||||
nw_dst_mask: ip?.mask,
|
||||
tp_src: port,
|
||||
in_port: ofport,
|
||||
},
|
||||
instructions
|
||||
)
|
||||
|
||||
if (nwProto !== undefined) {
|
||||
this._addFlow(
|
||||
{
|
||||
type: ofProtocol.matchType.standard,
|
||||
dl_type: dlType,
|
||||
dl_dst: mac,
|
||||
nw_proto: nwProto,
|
||||
nw_src: ip?.addr,
|
||||
nw_src_mask: ip?.mask,
|
||||
tp_dst: port,
|
||||
},
|
||||
instructions
|
||||
)
|
||||
}
|
||||
}
|
||||
if (direction.includes('to')) {
|
||||
if (nwProto !== undefined) {
|
||||
this._addFlow(
|
||||
{
|
||||
type: ofProtocol.matchType.standard,
|
||||
dl_type: dlType,
|
||||
dl_src: mac,
|
||||
nw_proto: nwProto,
|
||||
nw_dst: ip?.addr,
|
||||
nw_dst_mask: ip?.mask,
|
||||
tp_dst: port,
|
||||
in_port: ofport,
|
||||
},
|
||||
instructions
|
||||
)
|
||||
}
|
||||
this._addFlow(
|
||||
{
|
||||
type: ofProtocol.matchType.standard,
|
||||
dl_type: dlType,
|
||||
dl_dst: mac,
|
||||
nw_proto: nwProto,
|
||||
nw_src: ip?.addr,
|
||||
nw_src_mask: ip?.mask,
|
||||
tp_src: port,
|
||||
},
|
||||
instructions
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async deleteRule(vif, protocol, port, ipRange, direction, ofport) {
|
||||
log.info('Deleting OF rule', {
|
||||
protocol,
|
||||
port,
|
||||
ipRange,
|
||||
direction,
|
||||
vif: vif.uuid,
|
||||
})
|
||||
const ip = parseIp(ipRange)
|
||||
const { dlType, nwProto } = dlAndNwProtocolFromString(protocol)
|
||||
const mac = vif.MAC
|
||||
|
||||
await this._coalesceConnect()
|
||||
if (direction.includes('from')) {
|
||||
this._removeFlows({
|
||||
type: ofProtocol.matchType.standard,
|
||||
dl_type: dlType,
|
||||
dl_src: mac,
|
||||
nw_proto: nwProto,
|
||||
nw_dst: ip?.addr,
|
||||
nw_dst_mask: ip?.mask,
|
||||
tp_src: port,
|
||||
})
|
||||
if (nwProto !== undefined) {
|
||||
this._removeFlows({
|
||||
type: ofProtocol.matchType.standard,
|
||||
dl_type: dlType,
|
||||
dl_dst: mac,
|
||||
nw_proto: nwProto,
|
||||
nw_src: ip?.addr,
|
||||
nw_src_mask: ip?.mask,
|
||||
tp_dst: port,
|
||||
})
|
||||
}
|
||||
}
|
||||
if (direction.includes('to')) {
|
||||
if (nwProto !== undefined) {
|
||||
this._removeFlows({
|
||||
type: ofProtocol.matchType.standard,
|
||||
dl_type: dlType,
|
||||
dl_src: mac,
|
||||
nw_proto: nwProto,
|
||||
nw_dst: ip?.addr,
|
||||
nw_dst_mask: ip?.mask,
|
||||
tp_dst: port,
|
||||
})
|
||||
}
|
||||
this._removeFlows({
|
||||
type: ofProtocol.matchType.standard,
|
||||
dl_type: dlType,
|
||||
dl_dst: mac,
|
||||
nw_proto: nwProto,
|
||||
nw_src: ip?.addr,
|
||||
nw_src_mask: ip?.mask,
|
||||
tp_src: port,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
|
||||
_processMessage(message) {
|
||||
if (message.header === undefined) {
|
||||
log.error('Failed to get header while processing message', {
|
||||
message,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const ofType = message.header.type
|
||||
switch (ofType) {
|
||||
case ofProtocol.type.hello:
|
||||
this._sendPacket(
|
||||
this._syncMessage(ofProtocol.type.hello, message.header.xid)
|
||||
)
|
||||
this._sendPacket(
|
||||
this._syncMessage(ofProtocol.type.featuresRequest, message.header.xid)
|
||||
)
|
||||
break
|
||||
case ofProtocol.type.error:
|
||||
{
|
||||
const { code, type } = message
|
||||
log.error('OpenFlow error', {
|
||||
code,
|
||||
type,
|
||||
// data: openflow.unpack(data),
|
||||
})
|
||||
}
|
||||
break
|
||||
case ofProtocol.type.echoRequest:
|
||||
this._sendPacket(
|
||||
this._syncMessage(ofProtocol.type.echoReply, message.header.xid)
|
||||
)
|
||||
break
|
||||
case ofProtocol.type.packetIn:
|
||||
log.debug('PACKET_IN')
|
||||
break
|
||||
case ofProtocol.type.featuresReply:
|
||||
{
|
||||
const { datapath_id: dpid, capabilities, ports } = message
|
||||
log.debug('FEATURES_REPLY', { dpid, capabilities, ports })
|
||||
this._sendPacket(
|
||||
this._syncMessage(
|
||||
ofProtocol.type.getConfigRequest,
|
||||
message.header.xid
|
||||
)
|
||||
)
|
||||
}
|
||||
break
|
||||
case ofProtocol.type.getConfigReply:
|
||||
{
|
||||
const { flags } = message
|
||||
log.debug('CONFIG_REPLY', { flags })
|
||||
this.emit('ofConnected')
|
||||
}
|
||||
break
|
||||
case ofProtocol.type.portStatus:
|
||||
log.debug('PORT_STATUS')
|
||||
break
|
||||
case ofProtocol.type.flowRemoved:
|
||||
log.debug('FLOW_REMOVED')
|
||||
break
|
||||
default:
|
||||
log.error('Unknown OpenFlow type', { ofType })
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
_addFlow(match, instructions) {
|
||||
const packet = this._flowModMessage(
|
||||
ofProtocol.flowModCommand.add,
|
||||
match,
|
||||
instructions
|
||||
)
|
||||
this._sendPacket(packet)
|
||||
}
|
||||
|
||||
_removeFlows(match) {
|
||||
const packet = this._flowModMessage(ofProtocol.flowModCommand.delete, match)
|
||||
this._sendPacket(packet)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_syncMessage(type, xid = 1) {
|
||||
return {
|
||||
header: {
|
||||
version,
|
||||
type,
|
||||
xid,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
_flowModMessage(command, match, instructions = []) {
|
||||
// TODO: Do not use default priority?
|
||||
return {
|
||||
...this._syncMessage(ofProtocol.type.flowMod),
|
||||
command,
|
||||
flags: ofProtocol.flowModFlags.sendFlowRem,
|
||||
match,
|
||||
instructions,
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_sendPacket(packet) {
|
||||
const buf = openflow.pack(packet)
|
||||
try {
|
||||
this._socket.write(buf)
|
||||
} catch (error) {
|
||||
log.error('Error while writing into socket', {
|
||||
error,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _parseMessages() {
|
||||
for await (const msg of parse(this._socket)) {
|
||||
if (msg.header !== undefined) {
|
||||
this._processMessage(msg)
|
||||
} else {
|
||||
log.error('Error: Message is unparseable', { msg })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _connect() {
|
||||
if (this._socket !== undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
this._socket = await this._tlsHelper.connect(
|
||||
this.host.address,
|
||||
OPENFLOW_PORT
|
||||
)
|
||||
|
||||
const deleteSocket = () => {
|
||||
this._socket = undefined
|
||||
}
|
||||
this._socket.on('error', deleteSocket)
|
||||
this._socket.on('end', deleteSocket)
|
||||
|
||||
this._parseMessages().catch(error => {
|
||||
log.error('Error while parsing OF messages', error)
|
||||
})
|
||||
|
||||
await fromEvent(this, 'ofConnected')
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,12 @@
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import fromEvent from 'promise-toolbox/fromEvent'
|
||||
import { connect } from 'tls'
|
||||
import { forOwn, toPairs } from 'lodash'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller:ovsdb-client')
|
||||
|
||||
const OVSDB_PORT = 6640
|
||||
const PROTOCOLS = 'OpenFlow11' // Supported OpenFlow versions
|
||||
const TARGET = 'pssl:' // OpenFlow Controller target
|
||||
|
||||
// =============================================================================
|
||||
|
||||
@@ -17,10 +14,6 @@ function toMap(object) {
|
||||
return ['map', toPairs(object)]
|
||||
}
|
||||
|
||||
function setFromSelect(object) {
|
||||
return object[0] === 'set' ? object[1] : [object]
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export class OvsdbClient {
|
||||
@@ -43,7 +36,7 @@ export class OvsdbClient {
|
||||
- `remote_ip`: Remote IP of the tunnel
|
||||
*/
|
||||
|
||||
constructor(host, tlsHelper) {
|
||||
constructor(host, clientKey, clientCert, caCert) {
|
||||
this._numberOfPortAndInterface = 0
|
||||
this._requestId = 0
|
||||
|
||||
@@ -51,7 +44,7 @@ export class OvsdbClient {
|
||||
|
||||
this.host = host
|
||||
|
||||
this._tlsHelper = tlsHelper
|
||||
this.updateCertificates(clientKey, clientCert, caCert)
|
||||
|
||||
log.debug('New OVSDB client', {
|
||||
host: this.host.name_label,
|
||||
@@ -60,6 +53,18 @@ export class OvsdbClient {
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
updateCertificates(clientKey, clientCert, caCert) {
|
||||
this._clientKey = clientKey
|
||||
this._clientCert = clientCert
|
||||
this._caCert = caCert
|
||||
|
||||
log.debug('Certificates have been updated', {
|
||||
host: this.host.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async addInterfaceAndPort(
|
||||
network,
|
||||
remoteAddress,
|
||||
@@ -281,160 +286,6 @@ export class OvsdbClient {
|
||||
socket.destroy()
|
||||
}
|
||||
|
||||
async setBridgeController() {
|
||||
const socket = await this._connect()
|
||||
// Add controller to openvswitch table if needed
|
||||
const params = ['Open_vSwitch']
|
||||
|
||||
params.push({
|
||||
op: 'insert',
|
||||
table: 'Controller',
|
||||
row: {
|
||||
target: TARGET,
|
||||
},
|
||||
'uuid-name': 'new_controller',
|
||||
})
|
||||
|
||||
const networks = this.host.$PIFs.map(pif => pif?.$network)
|
||||
for (const network of networks) {
|
||||
// network can be undefined so we can't set its controller
|
||||
// It can happen if there's a ref problem within XAPI
|
||||
if (network === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const bridge = await this._getBridgeForNetwork(network, socket)
|
||||
if (bridge.uuid === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (await this._bridgeAlreadyControlled(bridge, socket)) {
|
||||
continue
|
||||
}
|
||||
|
||||
params.push({
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
|
||||
mutations: [
|
||||
['controller', 'insert', ['named-uuid', 'new_controller']],
|
||||
['protocols', 'insert', PROTOCOLS],
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
if (jsonObjects[0].error !== null) {
|
||||
log.error('Error while setting controller', {
|
||||
error: jsonObjects[0].error,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
} else {
|
||||
this._controllerUuid = jsonObjects[0].result[0].uuid[1]
|
||||
log.info('Controller set', { host: this.host.name_label })
|
||||
}
|
||||
|
||||
socket.destroy()
|
||||
}
|
||||
|
||||
async setBridgeControllerForNetwork(network) {
|
||||
const socket = await this._connect()
|
||||
if (this._controllerUuid === undefined) {
|
||||
const where = [['target', '==', TARGET]]
|
||||
const selectResult = await this._select(
|
||||
'Controller',
|
||||
['_uuid'],
|
||||
where,
|
||||
socket
|
||||
)
|
||||
|
||||
this._controllerUuid = selectResult._uuid[1]
|
||||
}
|
||||
assert.notStrictEqual(this._controllerUuid, undefined)
|
||||
|
||||
const bridge = await this._getBridgeForNetwork(network, socket)
|
||||
if (bridge.uuid === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
if (await this._bridgeAlreadyControlled(bridge, socket)) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
const mutateOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
|
||||
mutations: [
|
||||
['controller', 'insert', ['uuid', this._controllerUuid]],
|
||||
['protocols', 'insert', PROTOCOLS],
|
||||
],
|
||||
}
|
||||
|
||||
const params = ['Open_vSwitch', mutateOperation]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
if (jsonObjects[0].error !== null) {
|
||||
log.error('Error while setting controller for network', {
|
||||
error: jsonObjects[0].error,
|
||||
host: this.host.name_label,
|
||||
network: network.name_label,
|
||||
})
|
||||
} else {
|
||||
log.info('Controller set for network', {
|
||||
controller: this._controllerUuid,
|
||||
host: this.host.name_label,
|
||||
network: network.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
socket.destroy()
|
||||
}
|
||||
|
||||
async getOfPortForVif(vif) {
|
||||
const where = [
|
||||
['external_ids', 'includes', toMap({ 'xs-vif-uuid': vif.uuid })],
|
||||
]
|
||||
const socket = await this._connect()
|
||||
const selectResult = await this._select(
|
||||
'Interface',
|
||||
['name', 'ofport'],
|
||||
where,
|
||||
socket,
|
||||
true // multiResult
|
||||
)
|
||||
if (selectResult === undefined) {
|
||||
log.error('No of port found for VIF', {
|
||||
network: vif.$network.name_label,
|
||||
host: this.host.name_label,
|
||||
vm: vif.$VM.name_label,
|
||||
vif: vif.uuid,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
let ofport
|
||||
for (const i in selectResult) {
|
||||
const row = selectResult[i]
|
||||
if (!row.name.includes('tap')) {
|
||||
ofport = row.ofport
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
socket.destroy()
|
||||
return ofport
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
|
||||
_parseJson(chunk) {
|
||||
@@ -500,25 +351,6 @@ export class OvsdbClient {
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _bridgeAlreadyControlled(bridge, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', bridge.uuid]]]
|
||||
let result = await this._select('Bridge', ['controller'], where, socket)
|
||||
const controllers = setFromSelect(result.controller)
|
||||
for (const controller of controllers) {
|
||||
const where = [['_uuid', '==', controller]]
|
||||
result = await this._select('Controller', ['target'], where, socket)
|
||||
if (result.target === TARGET) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _getBridgeForNetwork(network, socket) {
|
||||
const where = [
|
||||
['external_ids', 'includes', toMap({ 'xs-network-uuids': network.uuid })],
|
||||
@@ -574,7 +406,9 @@ export class OvsdbClient {
|
||||
return
|
||||
}
|
||||
|
||||
return setFromSelect(selectResult.ports)
|
||||
return selectResult.ports[0] === 'set'
|
||||
? selectResult.ports[1]
|
||||
: [selectResult.ports]
|
||||
}
|
||||
|
||||
async _getPortInterfaces(portUuid, socket) {
|
||||
@@ -589,7 +423,9 @@ export class OvsdbClient {
|
||||
return
|
||||
}
|
||||
|
||||
return setFromSelect(selectResult.interfaces)
|
||||
return selectResult.interfaces[0] === 'set'
|
||||
? selectResult.interfaces[1]
|
||||
: [selectResult.interfaces]
|
||||
}
|
||||
|
||||
async _interfaceHasRemote(interfaceUuid, remoteAddress, socket) {
|
||||
@@ -615,12 +451,12 @@ export class OvsdbClient {
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _select(table, columns, where, socket, multiResult = false) {
|
||||
async _select(table, columns, where, socket) {
|
||||
const selectOperation = {
|
||||
op: 'select',
|
||||
table,
|
||||
columns,
|
||||
where,
|
||||
table: table,
|
||||
columns: columns,
|
||||
where: where,
|
||||
}
|
||||
|
||||
const params = ['Open_vSwitch', selectOperation]
|
||||
@@ -651,10 +487,6 @@ export class OvsdbClient {
|
||||
return
|
||||
}
|
||||
|
||||
if (multiResult) {
|
||||
return jsonResult.rows
|
||||
}
|
||||
|
||||
// For now all select operations should return only 1 row
|
||||
assert(
|
||||
jsonResult.rows.length === 1,
|
||||
@@ -706,7 +538,37 @@ export class OvsdbClient {
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_connect() {
|
||||
return this._tlsHelper.connect(this.host.address, OVSDB_PORT)
|
||||
async _connect() {
|
||||
const options = {
|
||||
ca: this._caCert,
|
||||
key: this._clientKey,
|
||||
cert: this._clientCert,
|
||||
host: this.host.address,
|
||||
port: OVSDB_PORT,
|
||||
rejectUnauthorized: false,
|
||||
requestCert: false,
|
||||
}
|
||||
const socket = connect(options)
|
||||
|
||||
try {
|
||||
await fromEvent(socket, 'secureConnect')
|
||||
} catch (error) {
|
||||
log.error('TLS connection failed', {
|
||||
error,
|
||||
code: error.code,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
socket.on('error', error => {
|
||||
log.error('Socket error', {
|
||||
error,
|
||||
code: error.code,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
})
|
||||
|
||||
return socket
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
import fromEvent from 'promise-toolbox/fromEvent'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import { connect } from 'tls'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller:tls-connect')
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export class TlsHelper {
|
||||
updateCertificates(clientKey, clientCert, caCert) {
|
||||
this._clientKey = clientKey
|
||||
this._clientCert = clientCert
|
||||
this._caCert = caCert
|
||||
log.debug('Certificates have been updated')
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async connect(address, port) {
|
||||
const options = {
|
||||
ca: this._caCert,
|
||||
cert: this._clientCert,
|
||||
ciphers: 'DEFAULT:!DH',
|
||||
host: address,
|
||||
key: this._clientKey,
|
||||
port,
|
||||
rejectUnauthorized: false,
|
||||
requestCert: false,
|
||||
}
|
||||
const socket = connect(options)
|
||||
try {
|
||||
await fromEvent(socket, 'secureConnect')
|
||||
} catch (error) {
|
||||
log.error('TLS connection failed', {
|
||||
error,
|
||||
address,
|
||||
port,
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
socket.on('error', error => {
|
||||
log.error('Socket error', {
|
||||
error,
|
||||
address,
|
||||
port,
|
||||
})
|
||||
})
|
||||
|
||||
return socket
|
||||
}
|
||||
}
|
||||
@@ -2,9 +2,6 @@
|
||||
#
|
||||
# See sample.config.toml to override.
|
||||
|
||||
# The clone of a template can exceeds the jest timeout if it's used by other tests to create a VM
|
||||
cloneTempVmTimeout = '1 minute'
|
||||
|
||||
# After some executions we saw that `deleteTempResources` takes around `21s`.
|
||||
# Then, we chose a large timeout to be sure that all resources created by `xo-server-test`
|
||||
# will be deleted
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"@babel/preset-env": "^7.1.6",
|
||||
"@iarna/toml": "^2.2.1",
|
||||
"@vates/parse-duration": "^0.1.0",
|
||||
"app-conf": "^0.8.0",
|
||||
"app-conf": "^0.7.0",
|
||||
"babel-plugin-lodash": "^3.2.11",
|
||||
"golike-defer": "^0.4.1",
|
||||
"jest": "^24.8.0",
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
import defer from 'golike-defer'
|
||||
import Xo from 'xo-lib'
|
||||
import XoCollection from 'xo-collection'
|
||||
import { defaultsDeep, find, forOwn, iteratee, pick } from 'lodash'
|
||||
import { defaultsDeep, find, forOwn, pick } from 'lodash'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
import { parseDuration } from '@vates/parse-duration'
|
||||
|
||||
@@ -84,15 +84,6 @@ class XoConnection extends Xo {
|
||||
|
||||
async waitObjectState(id, predicate) {
|
||||
let obj = this._objects.all[id]
|
||||
if (typeof predicate !== 'function') {
|
||||
const fn = iteratee(predicate)
|
||||
predicate = () => {
|
||||
if (!fn(obj)) {
|
||||
throw new Error('retry')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
await predicate(obj)
|
||||
@@ -109,12 +100,6 @@ class XoConnection extends Xo {
|
||||
return id
|
||||
}
|
||||
|
||||
async createTempResourceSet(params) {
|
||||
const { id } = await xo.call('resourceSet.create', params)
|
||||
this._tempResourceDisposers.push('resourceSet.delete', { id })
|
||||
return id
|
||||
}
|
||||
|
||||
async getUser(id) {
|
||||
return find(await super.call('user.getAll'), { id })
|
||||
}
|
||||
@@ -170,16 +155,6 @@ class XoConnection extends Xo {
|
||||
})
|
||||
}
|
||||
|
||||
async cloneTempVm(id) {
|
||||
const clonedVmId = await this.call('vm.clone', {
|
||||
full_copy: false,
|
||||
id,
|
||||
name: getDefaultName(),
|
||||
})
|
||||
this._durableResourceDisposers.push('vm.delete', { id: clonedVmId })
|
||||
return this.getOrWaitObject(clonedVmId)
|
||||
}
|
||||
|
||||
async startTempVm(id, params, withXenTools = false) {
|
||||
await this.call('vm.start', { id, ...params })
|
||||
this._tempResourceDisposers.push('vm.stop', { id, force: true })
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { parseDuration } from '@vates/parse-duration'
|
||||
|
||||
import config from '../_config'
|
||||
import xo from '../_xoConnection'
|
||||
|
||||
@@ -50,85 +48,4 @@ describe('issue', () => {
|
||||
|
||||
await xo.call('network.delete', { id })
|
||||
})
|
||||
|
||||
describe('4980', () => {
|
||||
let template
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(parseDuration(config.cloneTempVmTimeout))
|
||||
template = await xo.cloneTempVm(config.templates.default)
|
||||
})
|
||||
|
||||
const bootOrder = 'cd'
|
||||
const virtualizationMode = 'hvm'
|
||||
beforeAll(async () => {
|
||||
await Promise.all([
|
||||
xo.call('vm.set', {
|
||||
id: template.id,
|
||||
virtualizationMode,
|
||||
}),
|
||||
xo.call('vm.setBootOrder', { vm: template.id, order: bootOrder }),
|
||||
])
|
||||
await xo.waitObjectState(template.id, {
|
||||
virtualizationMode,
|
||||
boot: {
|
||||
order: bootOrder,
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test('create vm with disks should keep the template boot order', async () => {
|
||||
const vm = await xo.createTempVm({
|
||||
template: template.id,
|
||||
VDIs: [
|
||||
{
|
||||
size: 1,
|
||||
SR: config.srs.default,
|
||||
type: 'user',
|
||||
},
|
||||
],
|
||||
})
|
||||
expect(vm.boot.order).toBe(bootOrder)
|
||||
})
|
||||
|
||||
test('create vm without disks should make network boot the first option', async () => {
|
||||
const vm = await xo.createTempVm({
|
||||
template: template.id,
|
||||
})
|
||||
expect(vm.boot.order).toBe('n' + bootOrder)
|
||||
})
|
||||
|
||||
test('create vm with disks and network installation should make network boot the first option', async () => {
|
||||
const vm = await xo.createTempVm({
|
||||
template: template.id,
|
||||
installation: {
|
||||
method: 'network',
|
||||
repository: 'pxe',
|
||||
},
|
||||
VDIs: [
|
||||
{
|
||||
size: 1,
|
||||
SR: config.srs.default,
|
||||
type: 'user',
|
||||
},
|
||||
],
|
||||
})
|
||||
expect(vm.boot.order).toBe('n' + bootOrder)
|
||||
})
|
||||
})
|
||||
|
||||
describe('5265', () => {
|
||||
const rsName = 'xo-server-test resource set'
|
||||
const subjects = ['one', 'two', 'three']
|
||||
test('resourceSet.removeSubject call', async () => {
|
||||
const id = await xo.createTempResourceSet({
|
||||
name: rsName,
|
||||
subjects: subjects,
|
||||
})
|
||||
|
||||
await xo.call('resourceSet.removeSubject', {
|
||||
id,
|
||||
subject: subjects[0],
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,6 +1,14 @@
|
||||
import assert from 'assert'
|
||||
import assert, { match } from 'assert'
|
||||
import { URL } from 'url'
|
||||
|
||||
const RE = /(\\*)\{([^}]+)\}/
|
||||
const evalTemplate = (template, fn) =>
|
||||
template.replace(RE, ([, escape, key]) => {
|
||||
const n = escape.length
|
||||
const escaped = n % 2 !== 0
|
||||
return escaped ? match.slice(n - 1 / 2) : escaped.slice(n / 2) + fn(key)
|
||||
})
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export const configurationSchema = {
|
||||
@@ -83,7 +91,14 @@ class XoServerIcinga2 {
|
||||
this._url = serverUrl.href
|
||||
|
||||
this._filter =
|
||||
configuration.filter !== undefined ? configuration.filter : ''
|
||||
configuration.filter !== undefined
|
||||
? compileTemplate(configuration.filter, {
|
||||
jobId: _ => _.job.id,
|
||||
jobName: _ => _.job.name,
|
||||
scheduleId: _ => _.schedule.id,
|
||||
scheduleName: _ => _.schedule.name,
|
||||
})
|
||||
: ''
|
||||
this._acceptUnauthorized = configuration.acceptUnauthorized
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-usage-report",
|
||||
"version": "0.9.0",
|
||||
"version": "0.8.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "Report resources usage with their evolution",
|
||||
"keywords": [
|
||||
@@ -41,7 +41,7 @@
|
||||
"csv-stringify": "^5.5.0",
|
||||
"handlebars": "^4.0.6",
|
||||
"html-minifier": "^4.0.0",
|
||||
"human-format": "^0.11.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.15.0"
|
||||
},
|
||||
|
||||
@@ -405,26 +405,16 @@ async function getSrsStats({ xo, xoObjects }) {
|
||||
}
|
||||
|
||||
function computeGlobalVmsStats({ haltedVms, vmsStats, xo }) {
|
||||
const allVms = vmsStats.map(vm => ({
|
||||
uuid: vm.uuid,
|
||||
name: vm.name,
|
||||
}))
|
||||
|
||||
haltedVms.forEach(vm => {
|
||||
const isReplication =
|
||||
'start' in vm.blockedOperations &&
|
||||
vm.tags.some(
|
||||
tag => tag === 'Disaster Recovery' || tag === 'Continuous Replication'
|
||||
)
|
||||
|
||||
// Exclude replicated VMs because they keep being created/destroyed due to the implementation
|
||||
if (!isReplication) {
|
||||
allVms.push({
|
||||
uuid: vm.uuid,
|
||||
name: vm.name_label,
|
||||
})
|
||||
}
|
||||
})
|
||||
const allVms = concat(
|
||||
map(vmsStats, vm => ({
|
||||
uuid: vm.uuid,
|
||||
name: vm.name,
|
||||
})),
|
||||
map(haltedVms, vm => ({
|
||||
uuid: vm.uuid,
|
||||
name: vm.name_label,
|
||||
}))
|
||||
)
|
||||
|
||||
return Object.assign(
|
||||
computeMeans(vmsStats, [
|
||||
@@ -436,7 +426,7 @@ function computeGlobalVmsStats({ haltedVms, vmsStats, xo }) {
|
||||
'netTransmission',
|
||||
]),
|
||||
{
|
||||
number: vmsStats.length + haltedVms.length,
|
||||
number: allVms.length,
|
||||
allVms,
|
||||
}
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user