Compare commits

..

3 Commits

Author SHA1 Message Date
Pierre Donias
87dc362f10 Fallback to uname 2023-09-28 09:56:24 +02:00
Pierre Donias
918a577bb0 Handle bad versions 2023-09-27 16:47:52 +02:00
Pierre Donias
8ab7f033e0 feat(netbox): platform: append major version to OS name 2023-09-27 09:15:07 +02:00
45 changed files with 95 additions and 687 deletions

View File

@@ -22,7 +22,7 @@
"fuse-native": "^2.2.6",
"lru-cache": "^7.14.0",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^4.6.0"
"vhd-lib": "^4.5.0"
},
"scripts": {
"postversion": "npm publish --access public"

View File

@@ -7,7 +7,7 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.42.1",
"@xen-orchestra/backups": "^0.42.0",
"@xen-orchestra/fs": "^4.1.0",
"filenamify": "^6.0.0",
"getopts": "^2.2.5",

View File

@@ -681,13 +681,11 @@ export class RemoteAdapter {
}
}
async outputStream(path, input, { checksum = true, maxStreamLength, streamLength, validator = noop } = {}) {
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
const container = watchStreamSize(input)
await this._handler.outputStream(path, input, {
checksum,
dirMode: this._dirMode,
maxStreamLength,
streamLength,
async validator() {
await input.task
return validator.apply(this, arguments)
@@ -744,15 +742,8 @@ export class RemoteAdapter {
}
}
async readFullVmBackup(metadata) {
const xvaPath = resolve('/', dirname(metadata._filename), metadata.xva)
const stream = await this._handler.createReadStream(xvaPath)
try {
stream.length = await this._handler.getSize(xvaPath)
} catch (error) {
warn(`Can't compute length of xva file`, { xvaPath, error })
}
return stream
readFullVmBackup(metadata) {
return this._handler.createReadStream(resolve('/', dirname(metadata._filename), metadata.xva))
}
async readVmBackupMetadata(path) {

View File

@@ -29,8 +29,6 @@ export const FullRemote = class FullRemoteVmBackupRunner extends AbstractRemote
writer =>
writer.run({
stream: forkStreamUnpipe(stream),
// stream is copied and transformed, it's not safe to attach additionnal properties to it
streamLength: stream.length,
timestamp: metadata.timestamp,
vm: metadata.vm,
vmSnapshot: metadata.vmSnapshot,

View File

@@ -35,22 +35,13 @@ export const FullXapi = class FullXapiVmBackupRunner extends AbstractXapi {
useSnapshot: false,
})
)
const vdis = await exportedVm.$getDisks()
let maxStreamLength = 1024 * 1024 // Ovf file and tar headers are a few KB, let's stay safe
vdis.forEach(vdiRef => {
const vdi = this._xapi.getObject(vdiRef)
maxStreamLength += vdi.physical_utilisation ?? 0 // at most the xva will take the physical usage of the disk
// it can be smaller due to the smaller block size for xva than vhd, and compression of xcp-ng
})
const sizeContainer = watchStreamSize(stream)
const timestamp = Date.now()
await this._callWriters(
writer =>
writer.run({
maxStreamLength,
sizeContainer,
stream: forkStreamUnpipe(stream),
timestamp,

View File

@@ -24,7 +24,7 @@ export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
)
}
async _run({ maxStreamLength, timestamp, sizeContainer, stream, streamLength, vm, vmSnapshot }) {
async _run({ timestamp, sizeContainer, stream, vm, vmSnapshot }) {
const settings = this._settings
const job = this._job
const scheduleId = this._scheduleId
@@ -65,8 +65,6 @@ export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
await Task.run({ name: 'transfer' }, async () => {
await adapter.outputStream(dataFilename, stream, {
maxStreamLength,
streamLength,
validator: tmpPath => adapter.isValidXva(tmpPath),
})
return { size: sizeContainer.size }

View File

@@ -1,9 +1,9 @@
import { AbstractWriter } from './_AbstractWriter.mjs'
export class AbstractFullWriter extends AbstractWriter {
async run({ maxStreamLength, timestamp, sizeContainer, stream, streamLength, vm, vmSnapshot }) {
async run({ timestamp, sizeContainer, stream, vm, vmSnapshot }) {
try {
return await this._run({ maxStreamLength, timestamp, sizeContainer, stream, streamLength, vm, vmSnapshot })
return await this._run({ timestamp, sizeContainer, stream, vm, vmSnapshot })
} finally {
// ensure stream is properly closed
stream.destroy()

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.42.1",
"version": "0.42.0",
"engines": {
"node": ">=14.18"
},
@@ -44,7 +44,7 @@
"proper-lockfile": "^4.1.2",
"tar": "^6.1.15",
"uuid": "^9.0.0",
"vhd-lib": "^4.6.0",
"vhd-lib": "^4.5.0",
"xen-api": "^1.3.6",
"yazl": "^2.5.1"
},

View File

@@ -25,7 +25,6 @@
"@aws-sdk/lib-storage": "^3.54.0",
"@aws-sdk/middleware-apply-body-checksum": "^3.58.0",
"@aws-sdk/node-http-handler": "^3.54.0",
"@aws-sdk/s3-request-presigner": "^3.421.0",
"@sindresorhus/df": "^3.1.1",
"@vates/async-each": "^1.0.0",
"@vates/coalesce-calls": "^0.1.0",

View File

@@ -189,7 +189,7 @@ export default class RemoteHandlerAbstract {
* @param {number} [options.dirMode]
* @param {(this: RemoteHandlerAbstract, path: string) => Promise<undefined>} [options.validator] Function that will be called before the data is commited to the remote, if it fails, file should not exist
*/
async outputStream(path, input, { checksum = true, dirMode, maxStreamLength, streamLength, validator } = {}) {
async outputStream(path, input, { checksum = true, dirMode, validator } = {}) {
path = normalizePath(path)
let checksumStream
@@ -201,8 +201,6 @@ export default class RemoteHandlerAbstract {
}
await this._outputStream(path, input, {
dirMode,
maxStreamLength,
streamLength,
validator,
})
if (checksum) {

View File

@@ -5,7 +5,6 @@ import {
CreateMultipartUploadCommand,
DeleteObjectCommand,
GetObjectCommand,
GetObjectLockConfigurationCommand,
HeadObjectCommand,
ListObjectsV2Command,
PutObjectCommand,
@@ -18,7 +17,7 @@ import { getApplyMd5BodyChecksumPlugin } from '@aws-sdk/middleware-apply-body-ch
import { Agent as HttpAgent } from 'http'
import { Agent as HttpsAgent } from 'https'
import { createLogger } from '@xen-orchestra/log'
import { PassThrough, Transform, pipeline } from 'stream'
import { PassThrough, pipeline } from 'stream'
import { parse } from 'xo-remote-parser'
import copyStreamToBuffer from './_copyStreamToBuffer.js'
import guessAwsRegion from './_guessAwsRegion.js'
@@ -31,8 +30,6 @@ import { pRetry } from 'promise-toolbox'
// limits: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
const MAX_PART_SIZE = 1024 * 1024 * 1024 * 5 // 5GB
const MAX_PART_NUMBER = 10000
const MIN_PART_SIZE = 5 * 1024 * 1024
const { warn } = createLogger('xo:fs:s3')
export default class S3Handler extends RemoteHandlerAbstract {
@@ -74,6 +71,9 @@ export default class S3Handler extends RemoteHandlerAbstract {
}),
})
// Workaround for https://github.com/aws/aws-sdk-js-v3/issues/2673
this.#s3.middlewareStack.use(getApplyMd5BodyChecksumPlugin(this.#s3.config))
const parts = split(path)
this.#bucket = parts.shift()
this.#dir = join(...parts)
@@ -223,41 +223,18 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
}
async _outputStream(path, input, { maxStreamLength, streamLength, validator }) {
const maxInputLength = streamLength ?? maxStreamLength
let partSize
if (maxInputLength === undefined) {
warn(`Writing ${path} to a S3 remote without a max size set will cut it to 50GB`, { path })
partSize = MIN_PART_SIZE // min size for S3
} else {
partSize = Math.min(Math.max(Math.ceil(maxInputLength / MAX_PART_NUMBER), MIN_PART_SIZE), MAX_PART_SIZE)
}
// esnure we d'ont try to upload a stream to big for this part size
let readCounter = 0
const streamCutter = new Transform({
transform(chunk, encoding, callback) {
const MAX_SIZE = MAX_PART_NUMBER * partSize
readCounter += chunk.length
if (readCounter > MAX_SIZE) {
callback(new Error(`read ${readCounter} bytes, maximum size allowed is ${MAX_SIZE} `))
} else {
callback(null, chunk)
}
},
})
async _outputStream(path, input, { validator }) {
// Workaround for "ReferenceError: ReadableStream is not defined"
// https://github.com/aws/aws-sdk-js-v3/issues/2522
const Body = new PassThrough()
pipeline(input, streamCutter, Body, () => {})
pipeline(input, Body, () => {})
const upload = new Upload({
client: this.#s3,
params: {
...this.#createParams(path),
Body,
},
partSize,
leavePartsOnError: false,
})
await upload.done()
@@ -441,21 +418,6 @@ export default class S3Handler extends RemoteHandlerAbstract {
async _closeFile(fd) {}
async _sync() {
await super._sync()
try {
const res = await this.#s3.send(new GetObjectLockConfigurationCommand({ Bucket: this.#bucket }))
if (res.ObjectLockConfiguration?.ObjectLockEnabled === 'Enabled') {
// Workaround for https://github.com/aws/aws-sdk-js-v3/issues/2673
// increase memory consumption in outputStream as if buffer the streams
this.#s3.middlewareStack.use(getApplyMd5BodyChecksumPlugin(this.#s3.config))
}
} catch (error) {
if (error.Code !== 'ObjectLockConfigurationNotFoundError') {
throw error
}
}
}
useVhdDirectory() {
return true
}

View File

@@ -1,258 +0,0 @@
import fs from 'fs/promises'
import { getSignedUrl } from "@aws-sdk/s3-request-presigner"
import { createHash } from "crypto";
import {
CompleteMultipartUploadCommand,
CreateMultipartUploadCommand,
GetObjectLockConfigurationCommand,
PutObjectCommand,
S3Client,
UploadPartCommand,
} from '@aws-sdk/client-s3'
import { NodeHttpHandler } from '@aws-sdk/node-http-handler'
import { Agent as HttpAgent } from 'http'
import { Agent as HttpsAgent } from 'https'
import { parse } from 'xo-remote-parser'
import { join, split } from './dist/path.js'
import guessAwsRegion from './dist/_guessAwsRegion.js'
import { PassThrough } from 'stream'
import { readChunk } from '@vates/read-chunk'
import { pFromCallback } from 'promise-toolbox'
async function v2(url, inputStream){
const {
allowUnauthorized,
host,
path,
username,
password,
protocol,
region = guessAwsRegion(host),
} = parse(url)
const client = new S3Client({
apiVersion: '2006-03-01',
endpoint: `${protocol}://s3.us-east-2.amazonaws.com`,
forcePathStyle: true,
credentials: {
accessKeyId: username,
secretAccessKey: password,
},
region,
requestHandler: new NodeHttpHandler({
socketTimeout: 600000,
httpAgent: new HttpAgent({
keepAlive: true,
}),
httpsAgent: new HttpsAgent({
rejectUnauthorized: !allowUnauthorized,
keepAlive: true,
}),
}),
})
const pathParts = split(path)
const bucket = pathParts.shift()
const dir = join(...pathParts)
const command = new CreateMultipartUploadCommand({
Bucket: bucket, Key: join(dir, 'flov2')
})
const multipart = await client.send(command)
console.log({multipart})
const parts = []
// monitor memory usage
const intervalMonitorMemoryUsage = setInterval(()=>console.log(Math.round(process.memoryUsage().rss/1024/1024)), 2000)
const CHUNK_SIZE = Math.ceil(5*1024*1024*1024*1024/10000) // smallest chunk allowing 5TB upload
async function read(inputStream, maxReadSize){
if(maxReadSize === 0){
return null
}
process.stdout.write('+')
const chunk = await readChunk(inputStream, maxReadSize)
process.stdout.write('@')
return chunk
}
async function write(data, chunkStream, remainingBytes){
const ready = chunkStream.write(data)
if(!ready){
process.stdout.write('.')
await pFromCallback(cb=> chunkStream.once('drain', cb))
process.stdout.write('@')
}
remainingBytes -= data.length
process.stdout.write(remainingBytes+' ')
return remainingBytes
}
async function uploadChunk(inputStream){
const PartNumber = parts.length +1
let done = false
let remainingBytes = CHUNK_SIZE
const maxChunkPartSize = Math.round(CHUNK_SIZE / 1000)
const chunkStream = new PassThrough()
console.log({maxChunkPartSize,CHUNK_SIZE})
let data
let chunkBuffer = []
const hash = createHash('md5');
try{
while((data = await read(inputStream, Math.min(remainingBytes, maxChunkPartSize))) !== null){
chunkBuffer.push(data)
hash.update(data)
remainingBytes -= data.length
//remainingBytes = await write(data, chunkStream, remainingBytes)
}
console.log('data put')
const fullBuffer = Buffer.alloc(maxChunkPartSize,0)
done = remainingBytes > 0
// add padding at the end of the file (not a problem for tar like : xva/ova)
// if not content length will not match and we'll have UND_ERR_REQ_CONTENT_LENGTH_MISMATCH error
console.log('full padding')
while(remainingBytes > maxChunkPartSize){
chunkBuffer.push(fullBuffer)
hash.update(fullBuffer)
remainingBytes -= maxChunkPartSize
//remainingBytes = await write(fullBuffer,chunkStream, remainingBytes)
}
console.log('full padding done ')
chunkBuffer.push(Buffer.alloc(remainingBytes,0))
hash.update(Buffer.alloc(remainingBytes,0))
console.log('md5 ok ')
//await write(Buffer.alloc(remainingBytes,0),chunkStream, remainingBytes)
// wait for the end of the upload
const command = new UploadPartCommand({
...multipart,
PartNumber,
ContentLength:CHUNK_SIZE,
Body: chunkStream,
ContentMD5 : hash.digest('base64')
})
const promise = client.send(command)
for (const buffer of chunkBuffer){
await write(buffer, chunkStream, remainingBytes)
}
chunkStream.on('error', err => console.error(err))
const res = await promise
console.log({res, headers : res.headers })
parts.push({ ETag:/*res.headers.get('etag') */res.ETag, PartNumber })
}catch(err){
console.error(err)
throw err
}
return done
}
while(!await uploadChunk(inputStream)){
console.log('uploaded one chunk', parts.length)
}
// mark the upload as complete and ask s3 to glue the chunk together
const completRes = await client.send(
new CompleteMultipartUploadCommand({
...multipart,
MultipartUpload: { Parts: parts },
})
)
console.log({completRes})
clearInterval(intervalMonitorMemoryUsage)
}
async function simplePut(url , inputStream){
const {
allowUnauthorized,
host,
path,
username,
password,
protocol,
region = guessAwsRegion(host),
} = parse(url)
const client = new S3Client({
apiVersion: '2006-03-01',
endpoint: `${protocol}://s3.us-east-2.amazonaws.com`,
forcePathStyle: true,
credentials: {
accessKeyId: username,
secretAccessKey: password,
},
region,
requestHandler: new NodeHttpHandler({
socketTimeout: 600000,
httpAgent: new HttpAgent({
keepAlive: true,
}),
httpsAgent: new HttpsAgent({
rejectUnauthorized: !allowUnauthorized,
keepAlive: true,
}),
}),
})
const pathParts = split(path)
const bucket = pathParts.shift()
const dir = join(...pathParts)
//const hasObjectLock = await client.send(new GetObjectLockConfigurationCommand({Bucket: bucket}))
//console.log(hasObjectLock.ObjectLockConfiguration?.ObjectLockEnabled === 'Enabled')
const md5 = await createMD5('/tmp/1g')
console.log({md5})
const command = new PutObjectCommand({
Bucket: bucket, Key: join(dir, 'simple'),
ContentMD5: md5,
ContentLength: 1024*1024*1024,
Body: inputStream
})
const intervalMonitorMemoryUsage = setInterval(()=>console.log(Math.round(process.memoryUsage().rss/1024/1024)), 2000)
const res = await client.send(command)
/*
const presignedUrl = await getSignedUrl(client, command,{ expiresIn: 3600 });
const res = await fetch(presignedUrl, {
method: 'PUT',
body:inputStream,
duplex: "half",
headers:{
"x-amz-decoded-content-length": 1024*1024*1024,
"content-md5" : md5
}
})*/
clearInterval(intervalMonitorMemoryUsage)
console.log(res)
}
async function createMD5(filePath) {
const input = await fs.open(filePath) // big ass file
return new Promise((res, rej) => {
const hash = createHash('md5');
const rStream = input.createReadStream(filePath);
rStream.on('data', (data) => {
hash.update(data);
});
rStream.on('end', () => {
res(hash.digest('base64'));
});
})
}
const input = await fs.open('/tmp/1g') // big ass file
const inputStream = input.createReadStream()
const remoteUrl = ""
v2(remoteUrl,inputStream)
//simplePut(remoteUrl,inputStream)

View File

@@ -4,7 +4,6 @@
- Ability to migrate selected VMs to another host (PR [#7040](https://github.com/vatesfr/xen-orchestra/pull/7040))
- Ability to snapshot selected VMs (PR [#7021](https://github.com/vatesfr/xen-orchestra/pull/7021))
- Add Patches to Pool Dashboard (PR [#6709](https://github.com/vatesfr/xen-orchestra/pull/6709))
## **0.1.3** (2023-09-01)

View File

@@ -1,71 +0,0 @@
<template>
<UiCardSpinner v-if="!areSomeLoaded" />
<UiTable v-else class="hosts-patches-table" :class="{ desktop: isDesktop }">
<tr v-for="patch in sortedPatches" :key="patch.$id">
<th>{{ patch.name }}</th>
<td>
<div class="version">
{{ patch.version }}
<template v-if="hasMultipleHosts">
<UiSpinner v-if="!areAllLoaded" />
<UiCounter
v-else
v-tooltip="{
placement: 'left',
content: $t('n-hosts-awaiting-patch', {
n: patch.$hostRefs.size,
}),
}"
:value="patch.$hostRefs.size"
class="counter"
color="error"
/>
</template>
</div>
</td>
</tr>
</UiTable>
</template>
<script lang="ts" setup>
import UiCardSpinner from "@/components/ui/UiCardSpinner.vue";
import UiCounter from "@/components/ui/UiCounter.vue";
import UiSpinner from "@/components/ui/UiSpinner.vue";
import UiTable from "@/components/ui/UiTable.vue";
import type { XenApiPatchWithHostRefs } from "@/composables/host-patches.composable";
import { vTooltip } from "@/directives/tooltip.directive";
import { useUiStore } from "@/stores/ui.store";
import { computed } from "vue";
const props = defineProps<{
patches: XenApiPatchWithHostRefs[];
hasMultipleHosts: boolean;
areAllLoaded: boolean;
areSomeLoaded: boolean;
}>();
const sortedPatches = computed(() =>
[...props.patches].sort(
(patch1, patch2) => patch1.changelog.date - patch2.changelog.date
)
);
const { isDesktop } = useUiStore();
</script>
<style lang="postcss" scoped>
.hosts-patches-table.desktop {
max-width: 45rem;
}
.version {
display: flex;
gap: 1rem;
justify-content: flex-end;
align-items: center;
}
.counter {
font-size: 1rem;
}
</style>

View File

@@ -1,41 +0,0 @@
<template>
<UiCard>
<UiCardTitle class="patches-title">
{{ $t("patches") }}
<template v-if="areAllLoaded" #right>
{{ $t("n-missing", { n: count }) }}
</template>
</UiCardTitle>
<div class="table-container">
<HostPatches
:are-all-loaded="areAllLoaded"
:are-some-loaded="areSomeLoaded"
:has-multiple-hosts="hosts.length > 1"
:patches="patches"
/>
</div>
</UiCard>
</template>
<script lang="ts" setup>
import HostPatches from "@/components/HostPatchesTable.vue";
import UiCard from "@/components/ui/UiCard.vue";
import UiCardTitle from "@/components/ui/UiCardTitle.vue";
import { useHostPatches } from "@/composables/host-patches.composable";
import { useHostCollection } from "@/stores/xen-api/host.store";
const { records: hosts } = useHostCollection();
const { count, patches, areSomeLoaded, areAllLoaded } = useHostPatches(hosts);
</script>
<style lang="postcss" scoped>
.patches-title {
--section-title-right-color: var(--color-red-vates-base);
}
.table-container {
max-height: 40rem;
overflow: auto;
}
</style>

View File

@@ -34,9 +34,7 @@ const areSomeVmsSnapshoting = computed(() =>
vms.value.some((vm) => isOperationPending(vm, VM_OPERATION.SNAPSHOT))
);
const isDisabled = computed(
() => vms.value.length === 0 || areSomeVmsSnapshoting.value
);
const isDisabled = computed(() => vms.value.length === 0 || areSomeVmsSnapshoting.value);
const handleSnapshot = () => {
const vmRefsToSnapshot = Object.fromEntries(

View File

@@ -1,95 +0,0 @@
import type { XenApiHost } from "@/libs/xen-api/xen-api.types";
import { useHostStore } from "@/stores/xen-api/host.store";
import type { XenApiPatch } from "@/types/xen-api";
import { type Pausable, useTimeoutPoll, watchArray } from "@vueuse/core";
import { computed, type MaybeRefOrGetter, reactive, toValue } from "vue";
export type XenApiPatchWithHostRefs = XenApiPatch & { $hostRefs: Set<string> };
type HostConfig = {
timeoutPoll: Pausable;
patches: XenApiPatch[];
isLoaded: boolean;
};
export const useHostPatches = (hosts: MaybeRefOrGetter<XenApiHost[]>) => {
const hostStore = useHostStore();
const configByHost = reactive(new Map<string, HostConfig>());
const fetchHostPatches = async (hostRef: XenApiHost["$ref"]) => {
if (!configByHost.has(hostRef)) {
return;
}
const config = configByHost.get(hostRef)!;
config.patches = await hostStore.fetchMissingPatches(hostRef);
config.isLoaded = true;
};
const registerHost = (hostRef: XenApiHost["$ref"]) => {
if (configByHost.has(hostRef)) {
return;
}
const timeoutPoll = useTimeoutPoll(() => fetchHostPatches(hostRef), 10000, {
immediate: true,
});
configByHost.set(hostRef, {
timeoutPoll,
patches: [],
isLoaded: false,
});
};
const unregisterHost = (hostRef: string) => {
configByHost.get(hostRef)?.timeoutPoll.pause();
configByHost.delete(hostRef);
};
watchArray(
() => toValue(hosts).map((host) => host.$ref),
(_n, _p, addedRefs, removedRefs) => {
addedRefs.forEach((ref) => registerHost(ref));
removedRefs?.forEach((ref) => unregisterHost(ref));
},
{ immediate: true }
);
const patches = computed(() => {
const records = new Map<string, XenApiPatchWithHostRefs>();
configByHost.forEach(({ patches }, hostRef) => {
patches.forEach((patch) => {
const record = records.get(patch.$id);
if (record !== undefined) {
return record.$hostRefs.add(hostRef);
}
records.set(patch.$id, {
...patch,
$hostRefs: new Set([hostRef]),
});
});
});
return Array.from(records.values());
});
const count = computed(() => patches.value.length);
const areAllLoaded = computed(() =>
Array.from(configByHost.values()).every((config) => config.isLoaded)
);
const areSomeLoaded = computed(
() =>
areAllLoaded.value ||
Array.from(configByHost.values()).some((config) => config.isLoaded)
);
return { patches, count, areAllLoaded, areSomeLoaded };
};

View File

@@ -621,7 +621,7 @@ export interface XenApiBond extends XenApiRecord<"bond"> {
export type XenApiEvent<
RelationType extends ObjectType,
XRecord extends ObjectTypeToRecord<RelationType>,
XRecord extends ObjectTypeToRecord<RelationType>
> = {
id: string;
class: RelationType;

View File

@@ -87,8 +87,6 @@
"login": "Login",
"migrate": "Migrate",
"migrate-n-vms": "Migrate 1 VM | Migrate {n} VMs",
"n-hosts-awaiting-patch": "{n} host is awaiting this patch | {n} hosts are awaiting this patch",
"n-missing": "{n} missing",
"n-vms": "1 VM | {n} VMs",
"name": "Name",
"network": "Network",
@@ -109,7 +107,6 @@
"page-not-found": "This page is not to be found…",
"password": "Password",
"password-invalid": "Password invalid",
"patches": "Patches",
"pause": "Pause",
"please-confirm": "Please confirm",
"pool-cpu-usage": "Pool CPU Usage",

View File

@@ -87,8 +87,6 @@
"login": "Connexion",
"migrate": "Migrer",
"migrate-n-vms": "Migrer 1 VM | Migrer {n} VMs",
"n-hosts-awaiting-patch": "{n} hôte attend ce patch | {n} hôtes attendent ce patch",
"n-missing": "{n} manquant | {n} manquants",
"n-vms": "1 VM | {n} VMs",
"name": "Nom",
"network": "Réseau",
@@ -109,7 +107,6 @@
"page-not-found": "Cette page est introuvable…",
"password": "Mot de passe",
"password-invalid": "Mot de passe incorrect",
"patches": "Patches",
"pause": "Pause",
"please-confirm": "Veuillez confirmer",
"pool-cpu-usage": "Utilisation CPU du Pool",

View File

@@ -4,7 +4,6 @@ import type { XenApiHost } from "@/libs/xen-api/xen-api.types";
import { useXenApiStore } from "@/stores/xen-api.store";
import { createUseCollection } from "@/stores/xen-api/create-use-collection";
import { useHostMetricsStore } from "@/stores/xen-api/host-metrics.store";
import type { XenApiPatch } from "@/types/xen-api";
import { defineStore } from "pinia";
import { computed } from "vue";
@@ -43,36 +42,10 @@ export const useHostStore = defineStore("xen-api-host", () => {
});
}) as GetStats<XenApiHost>;
const fetchMissingPatches = async (
hostRef: XenApiHost["$ref"]
): Promise<XenApiPatch[]> => {
const xenApiStore = useXenApiStore();
const rawPatchesAsString = await xenApiStore
.getXapi()
.call<string>("host.call_plugin", [
hostRef,
"updater.py",
"check_update",
{},
]);
const rawPatches = JSON.parse(rawPatchesAsString) as Omit<
XenApiPatch,
"$id"
>[];
return rawPatches.map((rawPatch) => ({
...rawPatch,
$id: `${rawPatch.name}-${rawPatch.version}`,
}));
};
return {
...context,
runningHosts,
getStats,
fetchMissingPatches,
};
});

View File

@@ -21,19 +21,3 @@ export interface XenApiAlarm<RelationType extends RawObjectType>
triggerLevel: number;
type: XenApiAlarmType;
}
export type XenApiPatch = {
$id: string;
name: string;
description: string;
license: string;
release: string;
size: number;
url: string;
version: string;
changelog: {
date: number;
description: string;
author: string;
};
};

View File

@@ -3,7 +3,7 @@
<UiCardGroup>
<PoolDashboardStatus />
<PoolDashboardAlarms class="alarms" />
<PoolDashboardHostsPatches />
<UiCardComingSoon title="Patches" />
</UiCardGroup>
<UiCardGroup>
<UiCardGroup>
@@ -36,12 +36,12 @@ import PoolDashboardTasks from "@/components/pool/dashboard/PoolDashboardTasks.v
import PoolCpuUsageChart from "@/components/pool/dashboard/cpuUsage/PoolCpuUsageChart.vue";
import PoolDashboardCpuProvisioning from "@/components/pool/dashboard/PoolDashboardCpuProvisioning.vue";
import PoolDashboardCpuUsage from "@/components/pool/dashboard/PoolDashboardCpuUsage.vue";
import PoolDashboardHostsPatches from "@/components/pool/dashboard/PoolDashboardHostsPatches.vue";
import PoolDashboardNetworkChart from "@/components/pool/dashboard/PoolDashboardNetworkChart.vue";
import PoolDashboardRamUsage from "@/components/pool/dashboard/PoolDashboardRamUsage.vue";
import PoolDashboardStatus from "@/components/pool/dashboard/PoolDashboardStatus.vue";
import PoolDashboardStorageUsage from "@/components/pool/dashboard/PoolDashboardStorageUsage.vue";
import PoolDashboardRamUsageChart from "@/components/pool/dashboard/ramUsage/PoolRamUsage.vue";
import UiCardComingSoon from "@/components/ui/UiCardComingSoon.vue";
import UiCardGroup from "@/components/ui/UiCardGroup.vue";
import { useHostCollection } from "@/stores/xen-api/host.store";
import { useVmCollection } from "@/stores/xen-api/vm.store";

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.26.34",
"version": "0.26.33",
"license": "AGPL-3.0-or-later",
"description": "XO Proxy used to remotely execute backup jobs",
"keywords": [
@@ -32,7 +32,7 @@
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.4",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.42.1",
"@xen-orchestra/backups": "^0.42.0",
"@xen-orchestra/fs": "^4.1.0",
"@xen-orchestra/log": "^0.6.0",
"@xen-orchestra/mixin": "^0.1.0",

View File

@@ -43,7 +43,7 @@
"pw": "^0.0.4",
"xdg-basedir": "^4.0.0",
"xo-lib": "^0.11.1",
"xo-vmdk-to-vhd": "^2.5.6"
"xo-vmdk-to-vhd": "^2.5.5"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -10,7 +10,7 @@
"@xen-orchestra/log": "^0.6.0",
"lodash": "^4.17.21",
"node-fetch": "^3.3.0",
"vhd-lib": "^4.6.0"
"vhd-lib": "^4.5.0"
},
"engines": {
"node": ">=14"

View File

@@ -34,7 +34,7 @@
"json-rpc-protocol": "^0.13.2",
"lodash": "^4.17.15",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^4.6.0",
"vhd-lib": "^4.5.0",
"xo-common": "^0.8.0"
},
"private": false,

View File

@@ -134,23 +134,22 @@ class Vdi {
if (stream.length === undefined) {
throw new Error('Trying to import a VDI without a length field. Please report this error to Xen Orchestra.')
}
const vdi = await this.getRecord('VDI', ref)
const sr = await this.getRecord('SR', vdi.SR)
try {
await this.putResource(cancelToken, stream, '/import_raw_vdi/', {
query: {
format,
vdi: ref,
},
task: await this.task_create(`Importing content into VDI ${vdi.name_label} on SR ${sr.name_label}`),
task: await this.task_create(`Importing content into VDI ${await this.getField('VDI', ref, 'name_label')}`),
})
} catch (error) {
// augment the error with as much relevant info as possible
const poolMaster = await this.getRecord('host', this.pool.master)
const [poolMaster, vdi] = await Promise.all([
this.getRecord('host', this.pool.master),
this.getRecord('VDI', ref),
])
error.pool_master = poolMaster
error.SR = sr
error.SR = await this.getRecord('SR', vdi.SR)
error.VDI = vdi
throw error
}

View File

@@ -1,40 +1,5 @@
# ChangeLog
## **next**
### Enhancements
- [Netbox] Don't delete VMs that have been created manually in XO-synced cluster [Forum#7639](https://xcp-ng.org/forum/topic/7639) (PR [#7008](https://github.com/vatesfr/xen-orchestra/pull/7008))
- [Kubernetes] _Search domains_ field is now optional [#7028](https://github.com/vatesfr/xen-orchestra/pull/7028)
- [Patches] Support new XenServer Updates system. See [our documentation](https://xen-orchestra.com/docs/updater.html#xenserver-updates). (PR [#7044](https://github.com/vatesfr/xen-orchestra/pull/7044))
- [REST API] Hosts' audit and system logs can be downloaded [#3968](https://github.com/vatesfr/xen-orchestra/issues/3968) (PR [#7048](https://github.com/vatesfr/xen-orchestra/pull/7048))
- [Host/Advanced] New button to download system logs [#3968](https://github.com/vatesfr/xen-orchestra/issues/3968) (PR [#7048](https://github.com/vatesfr/xen-orchestra/pull/7048))
- [Home/Hosts, Pools] Display host brand and version (PR [#7027](https://github.com/vatesfr/xen-orchestra/pull/7027))
- [SR] Ability to reclaim space [#1204](https://github.com/vatesfr/xen-orchestra/issues/1204) (PR [#7054](https://github.com/vatesfr/xen-orchestra/pull/7054))
- [XOA] New button to restart XO Server directly from the UI (PR [#7056](https://github.com/vatesfr/xen-orchestra/pull/7056))
### Bug fixes
- [Backup/Restore] Fix `Cannot read properties of undefined (reading 'id')` error when restoring via an XO Proxy (PR [#7026](https://github.com/vatesfr/xen-orchestra/pull/7026))
- [Google/GitHub Auth] Fix `Internal Server Error` (xo-server: `Cannot read properties of undefined (reading 'id')`) when logging in with Google or GitHub [Forum#7729](https://xcp-ng.org/forum/topic/7729) (PRs [#7031](https://github.com/vatesfr/xen-orchestra/pull/7031) [#7032](https://github.com/vatesfr/xen-orchestra/pull/7032))
- [Jobs] Fix schedules not being displayed on first load [#6968](https://github.com/vatesfr/xen-orchestra/issues/6968) (PR [#7034](https://github.com/vatesfr/xen-orchestra/pull/7034))
- [OVA Export] Fix support of disks with more than 8.2GiB of content (PR [#7047](https://github.com/vatesfr/xen-orchestra/pull/7047))
- [Backup] Fix `VHDFile implementation is not compatible with encrypted remote` when using VHD directory with encryption (PR [#7045](https://github.com/vatesfr/xen-orchestra/pull/7045))
- [Backup/Mirror] Fix `xo:fs:local WARN lock compromised` when mirroring a Backup Repository to a local/NFS/SMB repository ([#7043](https://github.com/vatesfr/xen-orchestra/pull/7043))
- [Ova import] Fix importing VM with collision in disk position (PR [#7051](https://github.com/vatesfr/xen-orchestra/pull/7051)) (issue [7046](https://github.com/vatesfr/xen-orchestra/issues/7046))
### Released packages
- vhd-lib 4.6.0
- @xen-orchestra/backups 0.42.1
- @xen-orchestra/proxy 0.26.34
- xo-vmdk-to-vhd 2.5.6
- xo-server 5.123.0
- xo-server-auth-github 0.3.1
- xo-server-auth-google 0.3.1
- xo-server-netbox 1.3.0
- xo-web 5.125.0
## **5.86.1** (2023-09-07)
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />

View File

@@ -7,13 +7,26 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Netbox] Don't delete VMs that have been created manually in XO-synced cluster [Forum#7639](https://xcp-ng.org/forum/topic/7639) (PR [#7008](https://github.com/vatesfr/xen-orchestra/pull/7008))
- [Kubernetes] _Search domains_ field is now optional [#7028](https://github.com/vatesfr/xen-orchestra/pull/7028)
- [Patches] Support new XenServer Updates system. See [our documentation](https://xen-orchestra.com/docs/updater.html#xenserver-updates). (PR [#7044](https://github.com/vatesfr/xen-orchestra/pull/7044))
- [REST API] Hosts' audit and system logs can be downloaded [#3968](https://github.com/vatesfr/xen-orchestra/issues/3968) (PR [#7048](https://github.com/vatesfr/xen-orchestra/pull/7048))
- [Host/Advanced] New button to download system logs [#3968](https://github.com/vatesfr/xen-orchestra/issues/3968) (PR [#7048](https://github.com/vatesfr/xen-orchestra/pull/7048))
- [Home/Hosts, Pools] Display host brand and version (PR [#7027](https://github.com/vatesfr/xen-orchestra/pull/7027))
- [SR] Ability to reclaim space [#1204](https://github.com/vatesfr/xen-orchestra/issues/1204) (PR [#7054](https://github.com/vatesfr/xen-orchestra/pull/7054))
- [XOA] New button to restart XO Server directly from the UI (PR [#7056](https://github.com/vatesfr/xen-orchestra/pull/7056))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [Backup/Mirror] Fix backup report not being sent (PR [#7049](https://github.com/vatesfr/xen-orchestra/pull/7049))
- [New VM] Only add MBR to cloud-init drive on Windows VMs to avoid booting issues (e.g. with Talos) (PR [#7050](https://github.com/vatesfr/xen-orchestra/pull/7050))
- [VDI Import] Add the SR name to the corresponding XAPI task (PR [#6979](https://github.com/vatesfr/xen-orchestra/pull/6979))
- [Backup/Restore] Fix `Cannot read properties of undefined (reading 'id')` error when restoring via an XO Proxy (PR [#7026](https://github.com/vatesfr/xen-orchestra/pull/7026))
- [Google/GitHub Auth] Fix `Internal Server Error` (xo-server: `Cannot read properties of undefined (reading 'id')`) when logging in with Google or GitHub [Forum#7729](https://xcp-ng.org/forum/topic/7729) (PRs [#7031](https://github.com/vatesfr/xen-orchestra/pull/7031) [#7032](https://github.com/vatesfr/xen-orchestra/pull/7032))
- [Jobs] Fix schedules not being displayed on first load [#6968](https://github.com/vatesfr/xen-orchestra/issues/6968) (PR [#7034](https://github.com/vatesfr/xen-orchestra/pull/7034))
- [OVA Export] Fix support of disks with more than 8.2GiB of content (PR [#7047](https://github.com/vatesfr/xen-orchestra/pull/7047))
- [Backup] Fix `VHDFile implementation is not compatible with encrypted remote` when using VHD directory with encryption (PR [#7045](https://github.com/vatesfr/xen-orchestra/pull/7045))
- [Backup/Mirror] Fix `xo:fs:local WARN lock compromised` when mirroring a Backup Repository to a local/NFS/SMB repository ([#7043](https://github.com/vatesfr/xen-orchestra/pull/7043))
- [Ova import] Fix importing VM with collision in disk position (PR [#7051](https://github.com/vatesfr/xen-orchestra/pull/7051)) (issue [7046](https://github.com/vatesfr/xen-orchestra/issues/7046))
### Packages to release
@@ -31,9 +44,13 @@
<!--packages-start-->
- @xen-orchestra/xapi minor
- xo-server-backup-reports patch
- xo-server patch
- xo-web patch
- @xen-orchestra/backups patch
- vhd-lib minor
- xo-vmdk-to-vhd patch
- xo-server minor
- xo-server-auth-github patch
- xo-server-auth-google patch
- xo-server-netbox minor
- xo-web minor
<!--packages-end-->

View File

@@ -31,7 +31,7 @@
"lodash": "^4.17.21",
"promise-toolbox": "^0.21.0",
"uuid": "^9.0.0",
"vhd-lib": "^4.6.0"
"vhd-lib": "^4.5.0"
},
"scripts": {
"postversion": "npm publish",

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "vhd-lib",
"version": "4.6.0",
"version": "4.5.0",
"license": "AGPL-3.0-or-later",
"description": "Primitives for VHD file handling",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",

View File

@@ -8,6 +8,6 @@
"promise-toolbox": "^0.19.2",
"readable-stream": "^3.1.1",
"throttle": "^1.0.3",
"vhd-lib": "^4.6.0"
"vhd-lib": "^4.5.0"
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-github",
"version": "0.3.1",
"version": "0.3.0",
"license": "AGPL-3.0-or-later",
"description": "GitHub authentication plugin for XO-Server",
"keywords": [

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-google",
"version": "0.3.1",
"version": "0.3.0",
"license": "AGPL-3.0-or-later",
"description": "Google authentication plugin for XO-Server",
"keywords": [

View File

@@ -249,7 +249,7 @@ class BackupReportsXoPlugin {
}),
])
if (job.type === 'backup' || job.type === 'mirrorBackup') {
if (job.type === 'backup') {
return this._ngVmHandler(log, job, schedule, force)
} else if (job.type === 'metadataBackup') {
return this._metadataHandler(log, job, schedule, force)

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-netbox",
"version": "1.3.0",
"version": "1.2.0",
"license": "AGPL-3.0-or-later",
"description": "Synchronizes pools managed by Xen Orchestra with Netbox",
"keywords": [

View File

@@ -336,8 +336,20 @@ class Netbox {
tags: [],
}
const distro = xoVm.os_version?.distro
if (distro != null) {
let distro = xoVm.os_version?.distro
if (distro !== undefined) {
// In some cases, the version isn't properly parsed by XAPI and
// os_version.major returns X.Y.Z instead of X
const majorVersionMatch = xoVm.os_version.major?.match(/^(\d+)(?:\.\d+){0,2}$/)
if (majorVersionMatch != null) {
distro += ` ${majorVersionMatch[1]}`
} else {
const unameMatch = xoVm.os_version.uname?.match(/^(\d)+/)
if (unameMatch != null) {
distro += ` ${unameMatch[1]}`
}
}
const slug = slugify(distro)
let nbPlatform = find(nbPlatforms, { slug })
if (nbPlatform === undefined) {

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.123.0",
"version": "5.122.0",
"license": "AGPL-3.0-or-later",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -41,7 +41,7 @@
"@vates/predicates": "^1.1.0",
"@vates/read-chunk": "^1.2.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.42.1",
"@xen-orchestra/backups": "^0.42.0",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/defined": "^0.0.1",
"@xen-orchestra/emit-async": "^1.0.0",
@@ -128,7 +128,7 @@
"unzipper": "^0.10.5",
"uuid": "^9.0.0",
"value-matcher": "^0.2.0",
"vhd-lib": "^4.6.0",
"vhd-lib": "^4.5.0",
"ws": "^8.2.3",
"xdg-basedir": "^5.1.0",
"xen-api": "^1.3.6",
@@ -136,7 +136,7 @@
"xo-collection": "^0.5.0",
"xo-common": "^0.8.0",
"xo-remote-parser": "^0.9.2",
"xo-vmdk-to-vhd": "^2.5.6"
"xo-vmdk-to-vhd": "^2.5.5"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -119,7 +119,7 @@ set.resolve = {
// FIXME: set force to false per default when correctly implemented in
// UI.
export async function restart({ bypassBackupCheck = false, host, force = false, suspendResidentVms }) {
export async function restart({ bypassBackupCheck = false, host, force = true, suspendResidentVms }) {
if (bypassBackupCheck) {
log.warn('host.restart with argument "bypassBackupCheck" set to true', { hostId: host.id })
} else {

View File

@@ -884,8 +884,6 @@ export const getAllUnhealthyVdiChainsLength = debounceWithKey(function getAllUnh
return unhealthyVdiChainsLengthBySr
}, 60e3)
getAllUnhealthyVdiChainsLength.permission = 'admin'
// -------------------------------------------------------------------
export function getVdiChainsInfo({ sr }) {

View File

@@ -1328,10 +1328,7 @@ export default class Xapi extends XapiBase {
)
),
])
// only add the MBR for windows VM
if (vm.platform.viridian === 'true') {
buffer = addMbr(buffer)
}
buffer = addMbr(buffer)
const vdi = await this._getOrWaitObject(
await this.VDI_create({
name_label: 'XO CloudConfigDrive',

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xo-vmdk-to-vhd",
"version": "2.5.6",
"version": "2.5.5",
"license": "AGPL-3.0-or-later",
"description": "JS lib reading and writing .vmdk and .ova files",
"keywords": [
@@ -26,7 +26,7 @@
"pako": "^2.0.4",
"promise-toolbox": "^0.21.0",
"tar-stream": "^2.2.0",
"vhd-lib": "^4.6.0",
"vhd-lib": "^4.5.0",
"xml2js": "^0.4.23"
},
"devDependencies": {

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-web",
"version": "5.125.0",
"version": "5.124.1",
"license": "AGPL-3.0-or-later",
"description": "Web interface client for Xen-Orchestra",
"keywords": [
@@ -137,7 +137,7 @@
"xo-common": "^0.8.0",
"xo-lib": "^0.11.1",
"xo-remote-parser": "^0.9.2",
"xo-vmdk-to-vhd": "^2.5.6"
"xo-vmdk-to-vhd": "^2.5.5"
},
"scripts": {
"build": "GIT_HEAD=$(git rev-parse HEAD) NODE_ENV=production gulp build",

View File

@@ -241,8 +241,7 @@ const messages = {
closeTunnel: 'Close tunnel',
createSupportTicket: 'Create a support ticket',
restartXoServer: 'Restart XO Server',
restartXoServerConfirm:
'Restarting XO Server will interrupt any backup job or XO task that is currently running. Xen Orchestra will also be unavailable for a few seconds. Are you sure you want to restart XO Server?',
restartXoServerConfirm: 'Restarting XO Server will interrupt any backup job or XO task that is currently running. Xen Orchestra will also be unavailable for a few seconds. Are you sure you want to restart XO Server?',
openTunnel: 'Open tunnel',
supportCommunity: 'The XOA check and the support tunnel are available in XOA.',
supportTunnel: 'Support tunnel',
@@ -2198,10 +2197,11 @@ const messages = {
pwdChangeError: 'Incorrect password',
pwdChangeErrorBody: 'The old password provided is incorrect. Your password has not been changed.',
changePasswordOk: 'OK',
forgetTokens: 'Forget all authentication tokens',
forgetTokensExplained: 'This prevents authenticating with existing tokens but the one used by the current session',
forgetTokensSuccess: 'Successfully forgot authentication tokens',
forgetTokensError: 'Error while forgetting authentication tokens',
forgetTokens: 'Forget all connection tokens',
forgetTokensExplained:
'This will prevent other clients from authenticating with existing tokens but will not kill active sessions',
forgetTokensSuccess: 'Successfully forgot connection tokens',
forgetTokensError: 'Error while forgetting connection tokens',
sshKeys: 'SSH keys',
newAuthToken: 'New token',
newSshKey: 'New SSH key',