Compare commits

..

13 Commits

Author SHA1 Message Date
Julien Fontanet
7174499228 WiP 2021-05-25 16:25:39 +02:00
Julien Fontanet
05aefa1d5c chore: update to http-request-plus@0.10.0 2021-05-25 14:35:52 +02:00
Julien Fontanet
059843f030 chore: update dev deps 2021-05-25 14:22:58 +02:00
Julien Fontanet
e202dc9851 fix(docs): use correct bin with forever-service 2021-05-23 18:53:26 +02:00
Pierre Donias
18ae664ba7 feat(xo-server-netbox): new plugin to synchronize pools with Netbox (#5783)
Fixes #5633
2021-05-21 19:39:02 +02:00
Julien Fontanet
76b563fa88 feat(xo-web/vm/console): make multiline clipboard input monospaced 2021-05-21 14:21:33 +02:00
Julien Fontanet
2553f4c161 feat(xo-web/host/install-certificate): make inputs monospaced 2021-05-21 14:20:56 +02:00
Julien Fontanet
f35c865348 feat(xo-web): SSH key input monospaced 2021-05-21 14:19:50 +02:00
Julien Fontanet
b873ba3a75 feat(xo-web): make CloudConfig inputs monospaced
Fixed #5786
2021-05-21 14:15:12 +02:00
Julien Fontanet
d49e388ea3 feat(xo-server/registerPlugin): log plugin metadata errors 2021-05-21 14:00:25 +02:00
Julien Fontanet
b931699175 feat(xo-server/registerPlugin): don't fail on JSON parsing errors 2021-05-21 14:00:06 +02:00
Julien Fontanet
55fd58efd8 fix(xo-server): reading plugin metadata
Fixes #5782
2021-05-21 13:58:32 +02:00
Julien Fontanet
773847e139 feat(xo-server,xo-proxy): add backupId to restore tasks 2021-05-21 13:50:27 +02:00
43 changed files with 1740 additions and 798 deletions

View File

@@ -18,6 +18,17 @@ const wrapCall = (fn, arg, thisArg) => {
* @returns {Promise<Item[]>}
*/
exports.asyncMap = function asyncMap(iterable, mapFn, thisArg = iterable) {
let onError
if (onError !== undefined) {
const original = mapFn
mapFn = async function () {
try {
return await original.apply(this, arguments)
} catch (error) {
return onError.call(this, error, ...arguments)
}
}
}
return Promise.all(Array.from(iterable, mapFn, thisArg))
}

View File

@@ -543,6 +543,40 @@ class RemoteAdapter {
async readVmBackupMetadata(path) {
return Object.defineProperty(JSON.parse(await this._handler.readFile(path)), '_filename', { value: path })
}
async writeFullVmBackup({ jobId, mode, scheduleId, timestamp, vm, vmSnapshot, xva }, sizeContainer, stream) {
const basename = formatFilenameDate(timestamp)
const dataBasename = basename + '.xva'
const dataFilename = backupDir + '/' + dataBasename
const metadataFilename = `${backupDir}/${basename}.json`
const metadata = {
jobId: job.id,
mode: job.mode,
scheduleId,
timestamp,
version: '2.0.0',
vm,
vmSnapshot: this._backup.exportedVm,
xva: './' + dataBasename,
}
const { deleteFirst } = settings
if (deleteFirst) {
await deleteOldBackups()
}
await adapter.outputStream(stream, dataFilename, {
validator: tmpPath => {
if (handler._getFilePath !== undefined) {
return isValidXva(handler._getFilePath('/' + tmpPath))
}
},
})
metadata.size = sizeContainer.size
await handler.outputFile(metadataFilename, JSON.stringify(metadata))
}
}
Object.assign(RemoteAdapter.prototype, {

View File

@@ -1,8 +1,11 @@
const CancelToken = require('promise-toolbox/CancelToken.js')
const Zone = require('node-zone')
const logAfterEnd = () => {
throw new Error('task has already ended')
const logAfterEnd = function (log) {
const error = new Error('task has already ended:' + this.id)
error.result = log.result
error.log = log
throw error
}
const noop = Function.prototype
@@ -44,11 +47,19 @@ class Task {
}
}
get id() {
return this.#id
}
#cancelToken
#id = Math.random().toString(36).slice(2)
#onLog
#zone
get id() {
return this.#id
}
constructor({ name, data, onLog }) {
let parentCancelToken, parentId
if (onLog === undefined) {
@@ -100,6 +111,8 @@ class Task {
run(fn, last = false) {
return this.#zone.run(() => {
try {
this.#cancelToken.throwIfRequested()
const result = fn()
let then
if (result != null && typeof (then = result.then) === 'function') {

View File

@@ -1,4 +1,5 @@
const assert = require('assert')
// const asyncFn = require('promise-toolbox/asyncFn')
const findLast = require('lodash/findLast.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const keyBy = require('lodash/keyBy.js')
@@ -143,6 +144,7 @@ exports.VmBackup = class VmBackup {
const doSnapshot =
this._isDelta || (!settings.offlineBackup && vm.power_state === 'Running') || settings.snapshotRetention !== 0
console.log({ doSnapshot })
if (doSnapshot) {
await Task.run({ name: 'snapshot' }, async () => {
if (!settings.bypassVdiChainsCheck) {
@@ -181,6 +183,7 @@ exports.VmBackup = class VmBackup {
await this._callWriters(writer => writer.prepare({ isFull }), 'writer.prepare()')
const deltaExport = await exportDeltaVm(exportedVm, baseVm, {
cancelToken: Task.cancelToken,
fullVdisRequired,
})
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
@@ -226,6 +229,7 @@ exports.VmBackup = class VmBackup {
async _copyFull() {
const { compression } = this.job
const stream = await this._xapi.VM_export(this.exportedVm.$ref, {
cancelToken: Task.cancelToken,
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
useSnapshot: false,
})
@@ -330,10 +334,22 @@ exports.VmBackup = class VmBackup {
this._baseVm = baseVm
this._fullVdisRequired = fullVdisRequired
Task.info('base data', {
vm: baseVm.uuid,
fullVdisRequired: Array.from(fullVdisRequired),
})
}
run = defer(this.run)
async run($defer) {
this.exportedVm = this.vm
this.timestamp = Date.now()
const doSnapshot = this._isDelta || vm.power_state === 'Running' || settings.snapshotRetention !== 0
if (!this._isDelta) {
}
const settings = this._settings
assert(
!settings.offlineBackup || settings.snapshotRetention === 0,
@@ -380,3 +396,6 @@ exports.VmBackup = class VmBackup {
}
}
}
// const { prototype } = exports.VmBackup
// prototype.run = asyncFn.cancelable(prototype.run)

View File

@@ -29,7 +29,6 @@
"get-stream": "^6.0.0",
"limit-concurrency-decorator": "^0.5.0",
"lodash": "^4.17.4",
"minio": "^7.0.18",
"promise-toolbox": "^0.19.2",
"proper-lockfile": "^4.1.2",
"readable-stream": "^3.0.6",

View File

@@ -1,7 +1,6 @@
import aws from '@sullux/aws-sdk'
import assert from 'assert'
import https from 'https'
import { Client as Minio } from 'minio'
import http from 'http'
import { parse } from 'xo-remote-parser'
import RemoteHandlerAbstract from './abstract'
@@ -11,10 +10,13 @@ import RemoteHandlerAbstract from './abstract'
// limits: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
const MIN_PART_SIZE = 1024 * 1024 * 5 // 5MB
const MAX_PART_SIZE = 1024 * 1024 * 1024 * 5 // 5GB
const MAX_PARTS_COUNT = 10000
const MAX_OBJECT_SIZE = 1024 * 1024 * 1024 * 1024 * 5 // 5TB
const IDEAL_FRAGMENT_SIZE = Math.ceil(MAX_OBJECT_SIZE / MAX_PARTS_COUNT) // the smallest fragment size that still allows a 5TB upload in 10000 fragments, about 524MB
export default class S3Handler extends RemoteHandlerAbstract {
constructor(remote, _opts) {
super(remote)
const { host, hostname, port, path, username, password, protocol, region } = parse(remote.url)
const { host, path, username, password, protocol, region } = parse(remote.url)
const params = {
accessKeyId: username,
apiVersion: '2006-03-01',
@@ -27,12 +29,8 @@ export default class S3Handler extends RemoteHandlerAbstract {
},
}
if (protocol === 'http') {
throw new Error('HTTP no longer supported, please use HTTPS')
} else {
params.httpOptions.agent = new https.Agent({
// TODO : UI checkbox
rejectUnauthorized: false,
})
params.httpOptions.agent = new http.Agent()
params.sslEnabled = false
}
if (region !== undefined) {
params.region = region
@@ -40,15 +38,6 @@ export default class S3Handler extends RemoteHandlerAbstract {
this._s3 = aws(params).s3
this._minioClient = new Minio({
endPoint: hostname,
port: port !== undefined ? +port : undefined,
useSSL: protocol !== 'http',
accessKey: username,
secretKey: password,
})
// TODO : UI checkbox
this._minioClient.setRequestOptions({ rejectUnauthorized: false })
const splitPath = path.split('/').filter(s => s.length)
this._bucket = splitPath.shift()
this._dir = splitPath.join('/')
@@ -84,7 +73,13 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
async _outputStream(path, input, { validator }) {
await this._minioClient.putObject(this._bucket, this._dir + path, input)
await this._s3.upload(
{
...this._createParams(path),
Body: input,
},
{ partSize: IDEAL_FRAGMENT_SIZE, queueSize: 1 }
)
if (validator !== undefined) {
try {
await validator.call(this, path)

View File

@@ -33,7 +33,7 @@
"content-type": "^1.0.4",
"cson-parser": "^4.0.7",
"getopts": "^2.2.3",
"http-request-plus": "^0.9.1",
"http-request-plus": "^0.10.0",
"json-rpc-protocol": "^0.13.1",
"promise-toolbox": "^0.19.2",
"pump": "^3.0.0",

View File

@@ -140,6 +140,16 @@ ${pkg.name} v${pkg.version}`
}
}
const $import = ({ $import: path }) => {
const data = fs.readFileSync(path, 'utf8')
const ext = extname(path).slice(1).toLowerCase()
const parse = FORMATS[ext]
if (parse === undefined) {
throw new Error(`unsupported file: ${path}`)
}
return visit(parse(data))
}
const seq = async seq => {
const j = callPath.length
for (let i = 0, n = seq.length; i < n; ++i) {
@@ -153,17 +163,13 @@ ${pkg.name} v${pkg.version}`
if (Array.isArray(node)) {
return seq(node)
}
return call(node)
const keys = Object.keys(node)
return keys.length === 1 && keys[0] === '$import' ? $import(node) : call(node)
}
let node
if (file !== '') {
const data = fs.readFileSync(file, 'utf8')
const ext = extname(file).slice(1).toLowerCase()
const parse = FORMATS[ext]
if (parse === undefined) {
throw new Error(`unsupported file: ${file}`)
}
await visit(parse(data))
node = { $import: file }
} else {
const method = args[0]
const params = {}
@@ -176,8 +182,9 @@ ${pkg.name} v${pkg.version}`
params[param.slice(0, j)] = parseValue(param.slice(j + 1))
}
await call({ method, params })
node = { method, params }
}
await visit(node)
}
main(process.argv.slice(2)).then(
() => {

View File

@@ -1,3 +1,5 @@
import Cancel from 'promise-toolbox/Cancel'
import CancelToken from 'promise-toolbox/CancelToken'
import Disposable from 'promise-toolbox/Disposable.js'
import fromCallback from 'promise-toolbox/fromCallback.js'
import { asyncMap } from '@xen-orchestra/async-map'
@@ -95,7 +97,8 @@ export default class Backups {
error.jobId = jobId
throw error
}
runningJobs[jobId] = true
const source = CancelToken.source()
runningJobs[jobId] = source.cancel
try {
return await run.apply(this, arguments)
} finally {
@@ -187,6 +190,7 @@ export default class Backups {
Task.run(
{
data: {
backupId,
jobId: metadata.jobId,
srId: srUuid,
time: metadata.timestamp,

View File

@@ -0,0 +1,38 @@
import { asyncMapSettled } from '@xen-orchestra/async-map'
export default class Task {
#tasks = new Map()
constructor(app) {
const tasks = new Map()
this.#tasks = tasks
app.api.addMethods({
task: {
*list() {
for (const id of tasks.keys()) {
yield { id }
}
},
cancel: [
({ taskId }) => this.cancel(taskId),
{
params: {
taskId: { type: 'string' },
},
},
],
},
})
app.hooks.on('stop', () => asyncMapSettled(tasks.values(), task => task.cancel()))
}
async cancel(taskId) {
await this.tasks.get(taskId).cancel()
}
register(task) {
this.#tasks.set(task.id, task)
}
}

View File

@@ -36,7 +36,7 @@
"fs-extra": "^9.0.0",
"fs-promise": "^2.0.3",
"get-stream": "^6.0.0",
"http-request-plus": "^0.8.0",
"http-request-plus": "^0.10.0",
"human-format": "^0.11.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",

View File

@@ -9,6 +9,7 @@
- [Metadata Backup] Add a warning on restoring a metadata backup (PR [#5769](https://github.com/vatesfr/xen-orchestra/pull/5769))
- [SAML] Compatible with users created with other authentication providers (PR [#5781](https://github.com/vatesfr/xen-orchestra/pull/5781))
- [Netbox] [Plugin](https://xen-orchestra.com/docs/advanced.html#netbox) to synchronize pools, VMs and IPs with [Netbox](https://netbox.readthedocs.io/en/stable/) (PR [#5783](https://github.com/vatesfr/xen-orchestra/pull/5783))
### Bug fixes
@@ -21,7 +22,13 @@
> Packages will be released in the order they are here, therefore, they should
> be listed by inverse order of dependency.
>
> Rule of thumb: add packages on top.
> Global order:
>
> - @vates/...
> - @xen-orchestra/...
> - xo-server-...
> - xo-server
> - xo-web
>
> The format is the following: - `$packageName` `$version`
>
@@ -40,5 +47,6 @@
- xen-api minor
- xo-server-auth-saml minor
- xo-server-backup-reports patch
- xo-server-netbox minor
- xo-web minor
- xo-server patch

View File

@@ -320,6 +320,39 @@ It works with few steps:
From there, you can even manage your existing resources with Terraform!
## Netbox
Synchronize your pools, VMs, network interfaces and IP addresses with your [Netbox](https://netbox.readthedocs.io/en/stable/) instance.
![](./assets/netbox.png)
- Go to your Netbox interface
- Configure prefixes:
- Go to IPAM > Prefixes > Add
- Manually create as many prefixes as needed for your infrastructure's IP addresses
:::warning
XO will try to find the right prefix for each IP address. If it can't find a prefix that fits, the IP address won't be synchronized.
:::
- Generate a token:
- Go to Admin > Tokens > Add token
- Create a token with "Write enabled"
- Add a UUID custom field:
- Got to Admin > Custom fields > Add custom field
- Create a custom field called "uuid"
- Assign it to object types `virtualization > cluster` and `virtualization > virtual machine`
![](./assets/customfield.png)
- Go to Xen Orchestra > Settings > Plugins > Netbox and fill out the configuration:
- Endpoint: the URL of your Netbox instance (e.g.: `https://netbox.company.net`)
- Token: the token you generated earlier
- Pools: the pools you wish to automatically synchronize with Netbox
- Interval: the time interval (in hours) between 2 auto-synchronizations. Leave empty if you don't want to synchronize automatically.
- Load the plugin (button next to the plugin's name)
- Manual synchronization: if you correctly configured and loaded the plugin, a "Synchronize with Netbox" button will appear in every pool's Advanced tab, which allows you to manually synchronize it with Netbox
## Recipes
:::tip

BIN
docs/assets/customfield.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

BIN
docs/assets/netbox.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

View File

@@ -122,9 +122,9 @@ forever start dist/cli.mjs
yarn global add forever
yarn global add forever-service
# Be sure to edit the path below to where your install is located!
cd /home/username/xen-orchestra/packages/xo-server/bin/
cd /home/username/xen-orchestra/packages/xo-server/
# Change the username below to the user owning XO
forever-service install orchestra -r username -s xo-server
forever-service install orchestra -r username -s dist/cli.mjs
```
The forever-service command above must be run in the xo-server bin directory. Now you can manage the service, and it will start on boot with the machine:

View File

@@ -199,9 +199,9 @@ forever start dist/cli.mjs
yarn global add forever
yarn global add forever-service
# Be sure to edit the path below to where your install is located!
cd /home/username/xen-orchestra/packages/xo-server/bin/
cd /home/username/xen-orchestra/packages/xo-server/
# Change the username below to the user owning XO
forever-service install orchestra -r username -s xo-server
forever-service install orchestra -r username -s dist/cli.mjs
```
The forever-service command above must be run in the xo-server bin directory. Now you can manage the service, and it will start on boot with the machine:

View File

@@ -34,7 +34,7 @@
"bind-property-descriptor": "^1.0.0",
"blocked": "^1.2.1",
"debug": "^4.0.1",
"http-request-plus": "^0.8.0",
"http-request-plus": "^0.10.0",
"jest-diff": "^26.4.2",
"json-rpc-protocol": "^0.13.1",
"kindof": "^2.0.0",

View File

@@ -291,6 +291,16 @@ export class Xapi extends EventEmitter {
return this._roCall(`${type}.get_${field}`, [ref])
}
async getFields(type, ref, fields) {
const values = {}
await Promise.all(
fields.map(async field => {
values[field] = await this._sessionCall(`${type}.get_${field}`, [ref])
})
)
return this._wrapRecord(type, ref, values)
}
setField(type, ref, field, value) {
return this.call(`${type}.set_${field}`, ref, value).then(noop)
}

View File

@@ -34,7 +34,7 @@
"chalk": "^4.1.0",
"exec-promise": "^0.7.0",
"fs-promise": "^2.0.3",
"http-request-plus": "^0.9.1",
"http-request-plus": "^0.10.0",
"human-format": "^0.11.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",

View File

@@ -43,8 +43,6 @@ export const parse = string => {
object.type = 's3'
object.region = parsed.hash.length === 0 ? undefined : parsed.hash.slice(1) // remove '#'
object.host = parsed.host
object.port = parsed.port
object.hostname = parsed.hostname
object.path = parsed.pathname
object.username = parsed.username
object.password = decodeURIComponent(parsed.password)

View File

@@ -0,0 +1 @@
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -0,0 +1 @@
../../scripts/babel-eslintrc.js

View File

@@ -0,0 +1 @@
../../scripts/npmignore

View File

@@ -0,0 +1,25 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# xo-server-netbox
> Synchronizes pools managed by Xen Orchestra with Netbox
## Usage
Like all other xo-server plugins, it can be configured directly via
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)

View File

@@ -0,0 +1,2 @@
Like all other xo-server plugins, it can be configured directly via
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).

View File

@@ -0,0 +1,51 @@
{
"name": "xo-server-netbox",
"version": "0.0.0",
"license": "AGPL-3.0-or-later",
"description": "Synchronizes pools managed by Xen Orchestra with Netbox",
"keywords": [
"netbox",
"orchestra",
"plugin",
"web",
"xen",
"xen-orchestra",
"xo-server"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-netbox",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-netbox",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"preferGlobal": false,
"main": "dist/",
"engines": {
"node": ">=14.6"
},
"devDependencies": {
"@babel/cli": "^7.13.16",
"@babel/core": "^7.14.0",
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.13.8",
"@babel/plugin-proposal-optional-chaining": "^7.13.12",
"@babel/preset-env": "^7.14.1",
"@xen-orchestra/log": "^0.2.0",
"assert": "^2.0.0",
"cross-env": "^7.0.3",
"is-in-subnet": "^4.0.1",
"lodash": "^4.17.21"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}

View File

@@ -0,0 +1,580 @@
import assert from 'assert'
import { createLogger } from '@xen-orchestra/log'
import { find, flatten, forEach, groupBy, isEmpty, keyBy, mapValues, trimEnd, zipObject } from 'lodash'
import { isInSubnet } from 'is-in-subnet'
const log = createLogger('xo:netbox')
const CLUSTER_TYPE = 'XCP-ng Pool'
const CHUNK_SIZE = 100
const NAME_MAX_LENGTH = 64
const REQUEST_TIMEOUT = 120e3 // 2min
const M = 1024 ** 2
const G = 1024 ** 3
const { push } = Array.prototype
const diff = (newer, older) => {
if (typeof newer !== 'object') {
return newer === older ? undefined : newer
}
newer = { ...newer }
Object.keys(newer).forEach(key => {
if (diff(newer[key], older[key]) === undefined) {
delete newer[key]
}
})
return isEmpty(newer) ? undefined : newer
}
const indexName = (name, index) => {
const suffix = ` (${index})`
return name.slice(0, NAME_MAX_LENGTH - suffix.length) + suffix
}
const onRequest = req => {
req.setTimeout(REQUEST_TIMEOUT)
req.on('timeout', req.abort)
}
class Netbox {
#endpoint
#intervalToken
#loaded
#pools
#removeApiMethods
#syncInterval
#token
#xo
constructor({ xo }) {
this.#xo = xo
}
configure(configuration) {
this.#endpoint = trimEnd(configuration.endpoint, '/')
if (!/^https?:\/\//.test(this.#endpoint)) {
this.#endpoint = 'http://' + this.#endpoint
}
this.#token = configuration.token
this.#pools = configuration.pools
this.#syncInterval = configuration.syncInterval && configuration.syncInterval * 60 * 60 * 1e3
// We don't want to start the auto-sync if the plugin isn't loaded
if (this.#loaded) {
clearInterval(this.#intervalToken)
if (this.#syncInterval !== undefined) {
this.#intervalToken = setInterval(this.#synchronize.bind(this), this.#syncInterval)
}
}
}
load() {
const synchronize = ({ pools }) => this.#synchronize(pools)
synchronize.description = 'Synchronize XO pools with Netbox'
synchronize.params = {
pools: { type: 'array', optional: true, items: { type: 'string' } },
}
this.#removeApiMethods = this.#xo.addApiMethods({
netbox: { synchronize },
})
if (this.#syncInterval !== undefined) {
this.#intervalToken = setInterval(this.#synchronize.bind(this), this.#syncInterval)
}
this.#loaded = true
}
unload() {
this.#removeApiMethods()
clearInterval(this.#intervalToken)
this.#loaded = false
}
async #makeRequest(path, method, data) {
log.debug(
`${method} ${path}`,
Array.isArray(data) && data.length > 2 ? [...data.slice(0, 2), `and ${data.length - 2} others`] : data
)
let url = this.#endpoint + '/api' + path
const options = {
headers: { 'Content-Type': 'application/json', Authorization: `Token ${this.#token}` },
method,
onRequest,
}
const httpRequest = async () => {
try {
const response = await this.#xo.httpRequest(url, options)
const body = await response.readAll()
if (body.length > 0) {
return JSON.parse(body)
}
} catch (error) {
try {
const body = await error.response.readAll()
if (body.length > 0) {
log.error(body.toString())
}
} catch {
throw error
}
throw error
}
}
let response = []
// Split long POST request into chunks of CHUNK_SIZE objects to avoid a Bad Gateway errors
if (Array.isArray(data)) {
let offset = 0
while (offset < data.length) {
options.body = JSON.stringify(data.slice(offset, offset + CHUNK_SIZE))
push.apply(response, await httpRequest())
offset += CHUNK_SIZE
}
} else {
if (data !== undefined) {
options.body = JSON.stringify(data)
}
response = await httpRequest()
}
if (method !== 'GET') {
return response
}
// Handle pagination for GET requests
const { results } = response
while (response.next !== null) {
const { pathname, search } = new URL(response.next)
url = this.#endpoint + pathname + search
response = await httpRequest()
push.apply(results, response.results)
}
return results
}
async #synchronize(pools = this.#pools) {
const xo = this.#xo
log.debug('synchronizing')
// Cluster type
const clusterTypes = await this.#makeRequest(
`/virtualization/cluster-types/?name=${encodeURIComponent(CLUSTER_TYPE)}`,
'GET'
)
if (clusterTypes.length > 1) {
throw new Error('Found more than 1 "XCP-ng Pool" cluster type')
}
let clusterType
if (clusterTypes.length === 0) {
clusterType = await this.#makeRequest('/virtualization/cluster-types/', 'POST', {
name: CLUSTER_TYPE,
slug: CLUSTER_TYPE.toLowerCase().replace(/[^a-z0-9]+/g, '-'),
description: 'Created by Xen Orchestra',
})
} else {
clusterType = clusterTypes[0]
}
// Clusters
const clusters = keyBy(
await this.#makeRequest(`/virtualization/clusters/?type_id=${clusterType.id}`, 'GET'),
'custom_fields.uuid'
)
const clustersToCreate = []
const clustersToUpdate = []
for (const poolId of pools) {
const pool = xo.getObject(poolId)
const cluster = clusters[pool.uuid]
const updatedCluster = {
name: pool.name_label.slice(0, NAME_MAX_LENGTH),
type: clusterType.id,
custom_fields: { uuid: pool.uuid },
}
if (cluster === undefined) {
clustersToCreate.push(updatedCluster)
} else {
// `type` needs to be flattened so we can compare the 2 objects
const patch = diff(updatedCluster, { ...cluster, type: cluster.type.id })
if (patch !== undefined) {
clustersToUpdate.push({ ...patch, id: cluster.id })
}
}
}
Object.assign(
clusters,
keyBy(
flatten(
await Promise.all(
clustersToCreate.length === 0
? []
: await this.#makeRequest('/virtualization/clusters/', 'POST', clustersToCreate),
clustersToUpdate.length === 0
? []
: await this.#makeRequest('/virtualization/clusters/', 'PATCH', clustersToUpdate)
)
),
'custom_fields.uuid'
)
)
// VMs
const vms = xo.getObjects({ filter: object => object.type === 'VM' && pools.includes(object.$pool) })
const oldNetboxVms = keyBy(
flatten(
// FIXME: It should be doable with one request:
// `cluster_id=1&cluster_id=2` but it doesn't work
// https://netbox.readthedocs.io/en/stable/rest-api/filtering/#filtering-objects
await Promise.all(
pools.map(poolId =>
this.#makeRequest(`/virtualization/virtual-machines/?cluster_id=${clusters[poolId].id}`, 'GET')
)
)
),
'custom_fields.uuid'
)
// Build collections for later
const netboxVms = {} // VM UUID → Netbox VM
const vifsByVm = {} // VM UUID → VIF
const ipsByDeviceByVm = {} // VM UUID → (VIF device → IP)
const vmsToCreate = []
const vmsToUpdate = []
for (const vm of Object.values(vms)) {
vifsByVm[vm.uuid] = vm.VIFs
const vmIpsByDevice = (ipsByDeviceByVm[vm.uuid] = {})
forEach(vm.addresses, (address, key) => {
const device = key.split('/')[0]
if (vmIpsByDevice[device] === undefined) {
vmIpsByDevice[device] = []
}
vmIpsByDevice[device].push(address)
})
const oldNetboxVm = oldNetboxVms[vm.uuid]
delete oldNetboxVms[vm.uuid]
const cluster = clusters[vm.$pool]
assert(cluster !== undefined)
const disk = Math.floor(
vm.$VBDs
.map(vbdId => xo.getObject(vbdId))
.filter(vbd => !vbd.is_cd_drive)
.map(vbd => xo.getObject(vbd.VDI))
.reduce((total, vdi) => total + vdi.size, 0) / G
)
const updatedVm = {
name: vm.name_label.slice(0, NAME_MAX_LENGTH),
cluster: cluster.id,
vcpus: vm.CPUs.number,
disk,
memory: Math.floor(vm.memory.dynamic[1] / M),
status: vm.power_state === 'Running' ? 'active' : 'offline',
custom_fields: { uuid: vm.uuid },
}
if (oldNetboxVm === undefined) {
vmsToCreate.push(updatedVm)
} else {
// Some properties need to be flattened to match the expected POST
// request objects
let patch = diff(updatedVm, {
...oldNetboxVm,
cluster: oldNetboxVm.cluster.id,
status: oldNetboxVm.status?.value,
})
// Check if a name mismatch is due to a name deduplication
if (patch?.name !== undefined) {
let match
if ((match = oldNetboxVm.name.match(/.* \((\d+)\)$/)) !== null) {
if (indexName(patch.name, match[1]) === oldNetboxVm.name) {
delete patch.name
if (isEmpty(patch)) {
patch = undefined
}
}
}
}
if (patch !== undefined) {
// $cluster is needed to deduplicate the VM names within the same
// cluster. It will be removed at that step.
vmsToUpdate.push({ ...patch, id: oldNetboxVm.id, $cluster: cluster.id })
} else {
netboxVms[vm.uuid] = oldNetboxVm
}
}
}
// Deduplicate VM names
vmsToCreate.forEach((vm, i) => {
const name = vm.name
let nameIndex = 1
while (
find(netboxVms, netboxVm => netboxVm.cluster.id === vm.cluster && netboxVm.name === vm.name) !== undefined ||
find(
vmsToCreate,
(vmToCreate, j) => vmToCreate.cluster === vm.cluster && vmToCreate.name === vm.name && i !== j
) !== undefined
) {
if (nameIndex >= 1e3) {
throw new Error(`Cannot deduplicate name of VM ${name}`)
}
vm.name = indexName(name, nameIndex++)
}
})
vmsToUpdate.forEach((vm, i) => {
const name = vm.name
if (name === undefined) {
delete vm.$cluster
return
}
let nameIndex = 1
while (
find(netboxVms, netboxVm => netboxVm.cluster.id === vm.$cluster && netboxVm.name === vm.name) !== undefined ||
find(vmsToCreate, vmToCreate => vmToCreate.cluster === vm.$cluster && vmToCreate.name === vm.name) !==
undefined ||
find(
vmsToUpdate,
(vmToUpdate, j) => vmToUpdate.$cluster === vm.$cluster && vmToUpdate.name === vm.name && i !== j
) !== undefined
) {
if (nameIndex >= 1e3) {
throw new Error(`Cannot deduplicate name of VM ${name}`)
}
vm.name = indexName(name, nameIndex++)
}
delete vm.$cluster
})
const vmsToDelete = Object.values(oldNetboxVms).map(vm => ({ id: vm.id }))
Object.assign(
netboxVms,
keyBy(
flatten(
(
await Promise.all([
vmsToDelete.length !== 0 &&
(await this.#makeRequest('/virtualization/virtual-machines/', 'DELETE', vmsToDelete)),
vmsToCreate.length === 0
? []
: await this.#makeRequest('/virtualization/virtual-machines/', 'POST', vmsToCreate),
vmsToUpdate.length === 0
? []
: await this.#makeRequest('/virtualization/virtual-machines/', 'PATCH', vmsToUpdate),
])
).slice(1)
),
'custom_fields.uuid'
)
)
// Interfaces
// { vmUuid: { ifName: if } }
const oldInterfaces = mapValues(
groupBy(
flatten(
await Promise.all(
pools.map(poolId =>
this.#makeRequest(`/virtualization/interfaces/?cluster_id=${clusters[poolId].id}`, 'GET')
)
)
),
'virtual_machine.id'
),
interfaces => keyBy(interfaces, 'name')
)
const interfaces = {} // VIF UUID → interface
const interfacesToCreateByVif = {} // VIF UUID → interface
const interfacesToUpdateByVif = {} // VIF UUID → interface
for (const [vmUuid, vifs] of Object.entries(vifsByVm)) {
const netboxVmId = netboxVms[vmUuid].id
const vmInterfaces = oldInterfaces[netboxVmId] ?? {}
for (const vifId of vifs) {
const vif = xo.getObject(vifId)
const name = `eth${vif.device}`
const oldInterface = vmInterfaces[name]
delete vmInterfaces[name]
const updatedInterface = {
name,
mac_address: vif.MAC.toUpperCase(),
virtual_machine: netboxVmId,
}
if (oldInterface === undefined) {
interfacesToCreateByVif[vif.uuid] = updatedInterface
} else {
const patch = diff(updatedInterface, {
...oldInterface,
virtual_machine: oldInterface.virtual_machine.id,
})
if (patch !== undefined) {
interfacesToUpdateByVif[vif.uuid] = { ...patch, id: oldInterface.id }
} else {
interfaces[vif.uuid] = oldInterface
}
}
}
}
const interfacesToDelete = flatten(
Object.values(oldInterfaces).map(oldInterfacesByName =>
Object.values(oldInterfacesByName).map(oldInterface => ({ id: oldInterface.id }))
)
)
;(
await Promise.all([
interfacesToDelete.length !== 0 &&
this.#makeRequest('/virtualization/interfaces/', 'DELETE', interfacesToDelete),
isEmpty(interfacesToCreateByVif)
? {}
: this.#makeRequest(
'/virtualization/interfaces/',
'POST',
Object.values(interfacesToCreateByVif)
).then(interfaces => zipObject(Object.keys(interfacesToCreateByVif), interfaces)),
isEmpty(interfacesToUpdateByVif)
? {}
: this.#makeRequest(
'/virtualization/interfaces/',
'PATCH',
Object.values(interfacesToUpdateByVif)
).then(interfaces => zipObject(Object.keys(interfacesToUpdateByVif), interfaces)),
])
)
.slice(1)
.forEach(newInterfaces => Object.assign(interfaces, newInterfaces))
// IPs
const [oldNetboxIps, prefixes] = await Promise.all([
this.#makeRequest('/ipam/ip-addresses/', 'GET').then(addresses => groupBy(addresses, 'assigned_object_id')),
this.#makeRequest('/ipam/prefixes/', 'GET'),
])
const ipsToDelete = []
const ipsToCreate = []
const ignoredIps = []
for (const [vmUuid, vifs] of Object.entries(vifsByVm)) {
const vmIpsByDevice = ipsByDeviceByVm[vmUuid]
if (vmIpsByDevice === undefined) {
continue
}
for (const vifId of vifs) {
const vif = xo.getObject(vifId)
const vifIps = vmIpsByDevice[vif.device]
if (vifIps === undefined) {
continue
}
const interface_ = interfaces[vif.uuid]
const interfaceOldIps = oldNetboxIps[interface_.id] ?? []
for (const ip of vifIps) {
// FIXME: Should we compare the IPs with their range? ie: can 2 IPs
// look identical but belong to 2 different ranges?
const netboxIpIndex = interfaceOldIps.findIndex(netboxIp => netboxIp.address.split('/')[0] === ip)
if (netboxIpIndex >= 0) {
interfaceOldIps.splice(netboxIpIndex, 1)
} else {
const prefix = prefixes.find(({ prefix }) => isInSubnet(ip, prefix))
if (prefix === undefined) {
ignoredIps.push(ip)
continue
}
ipsToCreate.push({
address: `${ip}/${prefix.prefix.split('/')[1]}`,
assigned_object_type: 'virtualization.vminterface',
assigned_object_id: interface_.id,
})
}
}
ipsToDelete.push(...interfaceOldIps.map(oldIp => ({ id: oldIp.id })))
}
}
if (ignoredIps.length > 0) {
log.warn('Could not find prefix for some IPs: ignoring them.', { ips: ignoredIps })
}
await Promise.all([
ipsToDelete.length !== 0 && this.#makeRequest('/ipam/ip-addresses/', 'DELETE', ipsToDelete),
ipsToCreate.length !== 0 && this.#makeRequest('/ipam/ip-addresses/', 'POST', ipsToCreate),
])
log.debug('synchronized')
}
async test() {
const randomSuffix = Math.random().toString(36).slice(2)
const name = '[TMP] Xen Orchestra Netbox plugin test - ' + randomSuffix
await this.#makeRequest('/virtualization/cluster-types/', 'POST', {
name,
slug: 'xo-test-' + randomSuffix,
description:
"This type has been created by Xen Orchestra's Netbox plugin test. If it hasn't been properly deleted, you may delete it manually.",
})
const clusterTypes = await this.#makeRequest(
`/virtualization/cluster-types/?name=${encodeURIComponent(name)}`,
'GET'
)
if (clusterTypes.length !== 1) {
throw new Error('Could not properly write and read Netbox')
}
await this.#makeRequest('/virtualization/cluster-types/', 'DELETE', [{ id: clusterTypes[0].id }])
}
}
export const configurationSchema = ({ xo: { apiMethods } }) => ({
description:
'Synchronize pools managed by Xen Orchestra with Netbox. Configuration steps: https://xen-orchestra.com/docs/advanced.html#netbox.',
type: 'object',
properties: {
endpoint: {
type: 'string',
title: 'Endpoint',
description: 'Netbox URI',
},
token: {
type: 'string',
title: 'Token',
description: 'Generate a token with write permissions from your Netbox interface',
},
pools: {
type: 'array',
title: 'Pools',
description: 'Pools to synchronize with Netbox',
items: {
type: 'string',
$type: 'pool',
},
},
syncInterval: {
type: 'number',
title: 'Interval',
description: 'Synchronization interval in hours - leave empty to disable auto-sync',
},
},
required: ['endpoint', 'token', 'pools'],
})
export default opts => new Netbox(opts)

View File

@@ -75,7 +75,7 @@
"helmet": "^3.9.0",
"highland": "^2.11.1",
"http-proxy": "^1.16.2",
"http-request-plus": "^0.8.0",
"http-request-plus": "^0.10.0",
"http-server-plus": "^0.11.0",
"human-format": "^0.11.0",
"iterable-backoff": "^0.1.0",

View File

@@ -290,6 +290,8 @@ async function setUpPassport(express, xo, { authentication: authCfg, http: { coo
// ===================================================================
const logPlugin = createLogger('xo:plugin')
// See https://github.com/nodejs/help/issues/3380
const requireResolve = createRequire(import.meta.url).resolve
@@ -297,7 +299,11 @@ async function registerPlugin(pluginPath, pluginName) {
const plugin = (await import(requireResolve(pluginPath))).default
const { description, version = 'unknown' } = await fse
.readFile(pluginPath + '/package.json')
.then(JSON.stringify, error => ({}))
.then(JSON.parse)
.catch(error => {
logPlugin.warn('reading package.json', { error })
return {}
})
// Supports both “normal” CommonJS and Babel's ES2015 modules.
let { default: factory = plugin, configurationSchema, configurationPresets, testSchema } = plugin
@@ -334,8 +340,6 @@ async function registerPlugin(pluginPath, pluginName) {
)
}
const logPlugin = createLogger('xo:plugin')
function registerPluginWrapper(pluginPath, pluginName) {
logPlugin.info(`register ${pluginName}`)

View File

@@ -460,6 +460,7 @@ export default class BackupNg {
return Task.run(
{
data: {
backupId: id,
jobId: metadata.jobId,
srId,
time: metadata.timestamp,

View File

@@ -794,6 +794,8 @@ const messages = {
setpoolMaster: 'Master',
syslogRemoteHost: 'Remote syslog host',
defaultMigrationNetwork: 'Default migration network',
syncNetbox: 'Synchronize with Netbox',
syncNetboxWarning: 'Are you sure you want to synchronize with Netbox?',
// ----- Pool host tab -----
hostNameLabel: 'Name',
hostDescription: 'Description',

View File

@@ -3001,3 +3001,12 @@ export const synchronizeLdapGroups = () =>
body: _('syncLdapGroupsWarning'),
icon: 'refresh',
}).then(() => _call('ldap.synchronizeGroups')::tap(subscribeGroups.forceRefresh), noop)
// Netbox plugin ---------------------------------------------------------------
export const synchronizeNetbox = pools =>
confirm({
title: _('syncNetbox'),
body: _('syncNetboxWarning'),
icon: 'refresh',
}).then(() => _call('netbox.synchronize', { pools: resolveIds(pools) }))

View File

@@ -37,7 +37,12 @@ export default class NewSshKeyModalBody extends BaseComponent {
<SingleLineRow>
<Col size={4}>{_('key')}</Col>
<Col size={8}>
<textarea className='form-control' onChange={this._onKeyChange} rows={10} value={key || ''} />
<textarea
className='form-control text-monospace'
onChange={this._onKeyChange}
rows={10}
value={key || ''}
/>
</Col>
</SingleLineRow>
</div>

View File

@@ -13,6 +13,11 @@ $brand-info: #044b7f;
@import "../../../node_modules/bootstrap/scss/bootstrap";
// imported from https://github.com/twbs/bootstrap/blob/d64466a2488bbaac9a1005db3a199a8bc6846e3e/scss/_variables.scss#L420
.text-monospace {
font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
}
// -------------------------------------------------------------------
$fa-font-path: "./";

View File

@@ -38,7 +38,7 @@ const InstallCertificateModal = decorate([
</Col>
<Col mediumSize={8}>
<DebounceTextarea
className='form-control'
className='form-control text-monospace'
id={state.inputCertificateId}
name='certificate'
onChange={effects.onChange}
@@ -55,7 +55,7 @@ const InstallCertificateModal = decorate([
</Col>
<Col mediumSize={8}>
<DebounceTextarea
className='form-control'
className='form-control text-monospace'
id={state.inputPrivateKeyId}
name='privateKey'
onChange={effects.onChange}
@@ -72,7 +72,7 @@ const InstallCertificateModal = decorate([
</Col>
<Col mediumSize={8}>
<DebounceTextarea
className='form-control'
className='form-control text-monospace'
id={state.inputCertificateChainId}
name='certificateChain'
onChange={effects.onChange}

View File

@@ -1142,7 +1142,7 @@ export default class NewVm extends BaseComponent {
{_('newVmUserConfigLabel')}
<br />
<DebounceTextarea
className='form-control'
className='form-control text-monospace'
disabled={installMethod !== 'customConfig'}
onChange={this._linkState('customConfig')}
rows={7}
@@ -1156,7 +1156,7 @@ export default class NewVm extends BaseComponent {
{_('newVmNetworkConfigLabel')} <NetworkConfigInfo />
<br />
<DebounceTextarea
className='form-control'
className='form-control text-monospace'
disabled={installMethod !== 'customConfig'}
onChange={this._linkState('networkConfig')}
rows={7}
@@ -1246,7 +1246,7 @@ export default class NewVm extends BaseComponent {
<label>{_('newVmCloudConfig')}</label>{' '}
{!coreOsDefaultTemplateError ? (
<DebounceTextarea
className='form-control'
className='form-control text-monospace'
onChange={this._linkState('cloudConfig')}
rows={7}
value={cloudConfig}

View File

@@ -7,8 +7,9 @@ import Component from 'base-component'
import Icon from 'icon'
import renderXoItem, { Network } from 'render-xo-item'
import SelectFiles from 'select-files'
import TabButton from 'tab-button'
import Upgrade from 'xoa-upgrade'
import { connectStore } from 'utils'
import { addSubscriptions, connectStore } from 'utils'
import { Container, Row, Col } from 'grid'
import { CustomFields } from 'custom-fields'
import { injectIntl } from 'react-intl'
@@ -28,6 +29,8 @@ import {
setPoolMaster,
setRemoteSyslogHost,
setRemoteSyslogHosts,
subscribePlugins,
synchronizeNetbox,
} from 'xo'
@connectStore(() => ({
@@ -65,6 +68,9 @@ class PoolMaster extends Component {
migrationNetwork: createGetObject((_, { pool }) => pool.otherConfig['xo:migrationNetwork']),
}
})
@addSubscriptions({
plugins: subscribePlugins,
})
export default class TabAdvanced extends Component {
_getMigrationNetworkPredicate = createSelector(
createCollectionWrapper(
@@ -84,6 +90,11 @@ export default class TabAdvanced extends Component {
networkIds => network => networkIds.has(network.id)
)
_isNetboxPluginLoaded = createSelector(
() => this.props.plugins,
plugins => plugins !== undefined && plugins.some(plugin => plugin.name === 'netbox' && plugin.loaded)
)
_onChangeMigrationNetwork = migrationNetwork => editPool(this.props.pool, { migrationNetwork: migrationNetwork.id })
_removeMigrationNetwork = () => editPool(this.props.pool, { migrationNetwork: null })
@@ -101,6 +112,19 @@ export default class TabAdvanced extends Component {
return (
<div>
<Container>
{this._isNetboxPluginLoaded() && (
<Row>
<Col className='text-xs-right'>
<TabButton
btnStyle='primary'
handler={synchronizeNetbox}
handlerParam={[pool]}
icon='refresh'
labelId='syncNetbox'
/>
</Col>
</Row>
)}
<Row>
<Col>
<h3>{_('xenSettingsLabel')}</h3>

View File

@@ -119,7 +119,7 @@ export default decorate([
</label>{' '}
<AvailableTemplateVars />
<DebounceTextarea
className='form-control'
className='form-control text-monospace'
id={state.inputTemplateId}
name='template'
onChange={effects.setInputValue}

View File

@@ -3,6 +3,7 @@ import ActionButton from 'action-button'
import decorate from 'apply-decorators'
import Icon from 'icon'
import React from 'react'
import Tooltip from 'tooltip'
import { addSubscriptions, resolveId } from 'utils'
import { alert, confirm } from 'modal'
import { createRemote, editRemote, subscribeRemotes } from 'xo'
@@ -11,7 +12,7 @@ import { format } from 'xo-remote-parser'
import { generateId, linkState } from 'reaclette-utils'
import { injectState, provideState } from 'reaclette'
import { map, some, trimStart } from 'lodash'
import { Password, Number } from 'form'
import { Password, Number, Toggle } from 'form'
import { SelectProxy } from 'select-objects'
const remoteTypes = {
@@ -39,6 +40,7 @@ export default decorate([
username: undefined,
directory: undefined,
bucket: undefined,
protocol: undefined,
region: undefined,
}),
effects: {
@@ -61,6 +63,7 @@ export default decorate([
proxyId = remote.proxy,
type = remote.type,
username = remote.username,
protocol = remote.protocol || 'https',
region = remote.region,
} = state
let { path = remote.path } = state
@@ -78,7 +81,7 @@ export default decorate([
port: port || undefined,
type,
username,
protocol: 'https',
protocol,
region,
}),
options: options !== '' ? options : null,
@@ -137,6 +140,9 @@ export default decorate([
setSecretKey(_, { target: { value } }) {
this.state.password = value
},
setInsecure(_, value) {
this.state.protocol = value ? 'http' : 'https'
},
},
computed: {
formId: generateId,
@@ -153,6 +159,7 @@ export default decorate([
name = remote.name || '',
options = remote.options || '',
password = remote.password || '',
protocol = remote.protocol || 'https',
region = remote.region || '',
parsedPath,
path = parsedPath || '',
@@ -332,7 +339,11 @@ export default decorate([
{type === 's3' && (
<fieldset className='form-group form-group'>
<div className='input-group form-group'>
<em className='text-warning'>HTTP support has been removed, only HTTPS is supported</em>
<span className='input-group-addon'>
<Tooltip content={formatMessage(messages.remoteS3TooltipProtocol)}>
<Toggle iconSize={1} onChange={effects.setInsecure} value={protocol === 'http'} />
</Tooltip>
</span>
<input
className='form-control'
name='host'

View File

@@ -34,7 +34,7 @@ class SendToClipboard extends Component {
return (
<div>
<textarea
className='form-control'
className='form-control text-monospace'
onChange={this.linkState('value')}
ref={this._selectContent}
rows={10}

View File

@@ -1,4 +1,5 @@
.*
gitignore-
/benchmark/
/benchmarks/

1526
yarn.lock

File diff suppressed because it is too large Load Diff