Compare commits

..

10 Commits

Author SHA1 Message Date
Florent BEAUCHAMP
4d10e261f8 feat(s3): compute sensible chunk size for s3 upload 2023-10-10 14:45:38 +02:00
Florent BEAUCHAMP
84252c3abe feat(fs/s3): compute md5 only when needed 2023-10-10 11:44:55 +02:00
Florent BEAUCHAMP
4fb48e01fa fix(@xen-orchestra/fs: compute md5 only when needed 2023-10-10 11:33:56 +02:00
Florent BEAUCHAMP
516fc3f6ff feat: object lock mode need content md5
also Put object part with a prevalculated md5 and size doesn'c consume additionnal memory against presigned + raw upload
2023-10-04 14:26:59 +02:00
Florent BEAUCHAMP
676851ea82 feat(s3): test upload without sdk 2023-10-02 14:49:21 +02:00
Florent BEAUCHAMP
a7a64f4281 fix(fs/s3): throw an error if upload >50GB 2023-10-02 14:31:01 +02:00
Gabriel Gunullu
2e1abad255 feat(xapi/VDI_importContent): add SR name_label to task name_label (#6979) 2023-09-28 16:10:29 +02:00
Julien Fontanet
c7d5b4b063 fix(xo-web/messages): clarify *forget tokens* description
Introduced by c7df11cc6
2023-09-28 15:41:10 +02:00
Julien Fontanet
cc5f4b0996 fix(xo-web/messages): connection token → authentication token
Uniformize naming.
2023-09-28 15:41:06 +02:00
Julien Fontanet
55f627ed83 chore: fix formatting
Introduced by 869f7ffab
2023-09-28 15:37:45 +02:00
32 changed files with 461 additions and 857 deletions

View File

@@ -681,11 +681,13 @@ export class RemoteAdapter {
}
}
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
async outputStream(path, input, { checksum = true, maxStreamLength, streamLength, validator = noop } = {}) {
const container = watchStreamSize(input)
await this._handler.outputStream(path, input, {
checksum,
dirMode: this._dirMode,
maxStreamLength,
streamLength,
async validator() {
await input.task
return validator.apply(this, arguments)
@@ -742,8 +744,15 @@ export class RemoteAdapter {
}
}
readFullVmBackup(metadata) {
return this._handler.createReadStream(resolve('/', dirname(metadata._filename), metadata.xva))
async readFullVmBackup(metadata) {
const xvaPath = resolve('/', dirname(metadata._filename), metadata.xva)
const stream = await this._handler.createReadStream(xvaPath)
try {
stream.length = await this._handler.getSize(xvaPath)
} catch (error) {
warn(`Can't compute length of xva file`, { xvaPath, error })
}
return stream
}
async readVmBackupMetadata(path) {

View File

@@ -29,6 +29,8 @@ export const FullRemote = class FullRemoteVmBackupRunner extends AbstractRemote
writer =>
writer.run({
stream: forkStreamUnpipe(stream),
// stream is copied and transformed, it's not safe to attach additionnal properties to it
streamLength: stream.length,
timestamp: metadata.timestamp,
vm: metadata.vm,
vmSnapshot: metadata.vmSnapshot,

View File

@@ -35,13 +35,22 @@ export const FullXapi = class FullXapiVmBackupRunner extends AbstractXapi {
useSnapshot: false,
})
)
const vdis = await exportedVm.$getDisks()
let maxStreamLength = 1024 * 1024 // Ovf file and tar headers are a few KB, let's stay safe
vdis.forEach(vdiRef => {
const vdi = this._xapi.getObject(vdiRef)
maxStreamLength += vdi.physical_utilisation ?? 0 // at most the xva will take the physical usage of the disk
// it can be smaller due to the smaller block size for xva than vhd, and compression of xcp-ng
})
const sizeContainer = watchStreamSize(stream)
const timestamp = Date.now()
await this._callWriters(
writer =>
writer.run({
maxStreamLength,
sizeContainer,
stream: forkStreamUnpipe(stream),
timestamp,

View File

@@ -24,7 +24,7 @@ export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
)
}
async _run({ timestamp, sizeContainer, stream, vm, vmSnapshot }) {
async _run({ maxStreamLength, timestamp, sizeContainer, stream, streamLength, vm, vmSnapshot }) {
const settings = this._settings
const job = this._job
const scheduleId = this._scheduleId
@@ -65,6 +65,8 @@ export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
await Task.run({ name: 'transfer' }, async () => {
await adapter.outputStream(dataFilename, stream, {
maxStreamLength,
streamLength,
validator: tmpPath => adapter.isValidXva(tmpPath),
})
return { size: sizeContainer.size }

View File

@@ -1,9 +1,9 @@
import { AbstractWriter } from './_AbstractWriter.mjs'
export class AbstractFullWriter extends AbstractWriter {
async run({ timestamp, sizeContainer, stream, vm, vmSnapshot }) {
async run({ maxStreamLength, timestamp, sizeContainer, stream, streamLength, vm, vmSnapshot }) {
try {
return await this._run({ timestamp, sizeContainer, stream, vm, vmSnapshot })
return await this._run({ maxStreamLength, timestamp, sizeContainer, stream, streamLength, vm, vmSnapshot })
} finally {
// ensure stream is properly closed
stream.destroy()

View File

@@ -25,6 +25,7 @@
"@aws-sdk/lib-storage": "^3.54.0",
"@aws-sdk/middleware-apply-body-checksum": "^3.58.0",
"@aws-sdk/node-http-handler": "^3.54.0",
"@aws-sdk/s3-request-presigner": "^3.421.0",
"@sindresorhus/df": "^3.1.1",
"@vates/async-each": "^1.0.0",
"@vates/coalesce-calls": "^0.1.0",

View File

@@ -189,7 +189,7 @@ export default class RemoteHandlerAbstract {
* @param {number} [options.dirMode]
* @param {(this: RemoteHandlerAbstract, path: string) => Promise<undefined>} [options.validator] Function that will be called before the data is commited to the remote, if it fails, file should not exist
*/
async outputStream(path, input, { checksum = true, dirMode, validator } = {}) {
async outputStream(path, input, { checksum = true, dirMode, maxStreamLength, streamLength, validator } = {}) {
path = normalizePath(path)
let checksumStream
@@ -201,6 +201,8 @@ export default class RemoteHandlerAbstract {
}
await this._outputStream(path, input, {
dirMode,
maxStreamLength,
streamLength,
validator,
})
if (checksum) {

View File

@@ -5,6 +5,7 @@ import {
CreateMultipartUploadCommand,
DeleteObjectCommand,
GetObjectCommand,
GetObjectLockConfigurationCommand,
HeadObjectCommand,
ListObjectsV2Command,
PutObjectCommand,
@@ -17,7 +18,7 @@ import { getApplyMd5BodyChecksumPlugin } from '@aws-sdk/middleware-apply-body-ch
import { Agent as HttpAgent } from 'http'
import { Agent as HttpsAgent } from 'https'
import { createLogger } from '@xen-orchestra/log'
import { PassThrough, pipeline } from 'stream'
import { PassThrough, Transform, pipeline } from 'stream'
import { parse } from 'xo-remote-parser'
import copyStreamToBuffer from './_copyStreamToBuffer.js'
import guessAwsRegion from './_guessAwsRegion.js'
@@ -30,6 +31,8 @@ import { pRetry } from 'promise-toolbox'
// limits: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
const MAX_PART_SIZE = 1024 * 1024 * 1024 * 5 // 5GB
const MAX_PART_NUMBER = 10000
const MIN_PART_SIZE = 5 * 1024 * 1024
const { warn } = createLogger('xo:fs:s3')
export default class S3Handler extends RemoteHandlerAbstract {
@@ -71,9 +74,6 @@ export default class S3Handler extends RemoteHandlerAbstract {
}),
})
// Workaround for https://github.com/aws/aws-sdk-js-v3/issues/2673
this.#s3.middlewareStack.use(getApplyMd5BodyChecksumPlugin(this.#s3.config))
const parts = split(path)
this.#bucket = parts.shift()
this.#dir = join(...parts)
@@ -223,18 +223,41 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
}
async _outputStream(path, input, { validator }) {
async _outputStream(path, input, { maxStreamLength, streamLength, validator }) {
const maxInputLength = streamLength ?? maxStreamLength
let partSize
if (maxInputLength === undefined) {
warn(`Writing ${path} to a S3 remote without a max size set will cut it to 50GB`, { path })
partSize = MIN_PART_SIZE // min size for S3
} else {
partSize = Math.min(Math.max(Math.ceil(maxInputLength / MAX_PART_NUMBER), MIN_PART_SIZE), MAX_PART_SIZE)
}
// esnure we d'ont try to upload a stream to big for this part size
let readCounter = 0
const streamCutter = new Transform({
transform(chunk, encoding, callback) {
const MAX_SIZE = MAX_PART_NUMBER * partSize
readCounter += chunk.length
if (readCounter > MAX_SIZE) {
callback(new Error(`read ${readCounter} bytes, maximum size allowed is ${MAX_SIZE} `))
} else {
callback(null, chunk)
}
},
})
// Workaround for "ReferenceError: ReadableStream is not defined"
// https://github.com/aws/aws-sdk-js-v3/issues/2522
const Body = new PassThrough()
pipeline(input, Body, () => {})
pipeline(input, streamCutter, Body, () => {})
const upload = new Upload({
client: this.#s3,
params: {
...this.#createParams(path),
Body,
},
partSize,
leavePartsOnError: false,
})
await upload.done()
@@ -418,6 +441,21 @@ export default class S3Handler extends RemoteHandlerAbstract {
async _closeFile(fd) {}
async _sync() {
await super._sync()
try {
const res = await this.#s3.send(new GetObjectLockConfigurationCommand({ Bucket: this.#bucket }))
if (res.ObjectLockConfiguration?.ObjectLockEnabled === 'Enabled') {
// Workaround for https://github.com/aws/aws-sdk-js-v3/issues/2673
// increase memory consumption in outputStream as if buffer the streams
this.#s3.middlewareStack.use(getApplyMd5BodyChecksumPlugin(this.#s3.config))
}
} catch (error) {
if (error.Code !== 'ObjectLockConfigurationNotFoundError') {
throw error
}
}
}
useVhdDirectory() {
return true
}

View File

@@ -0,0 +1,258 @@
import fs from 'fs/promises'
import { getSignedUrl } from "@aws-sdk/s3-request-presigner"
import { createHash } from "crypto";
import {
CompleteMultipartUploadCommand,
CreateMultipartUploadCommand,
GetObjectLockConfigurationCommand,
PutObjectCommand,
S3Client,
UploadPartCommand,
} from '@aws-sdk/client-s3'
import { NodeHttpHandler } from '@aws-sdk/node-http-handler'
import { Agent as HttpAgent } from 'http'
import { Agent as HttpsAgent } from 'https'
import { parse } from 'xo-remote-parser'
import { join, split } from './dist/path.js'
import guessAwsRegion from './dist/_guessAwsRegion.js'
import { PassThrough } from 'stream'
import { readChunk } from '@vates/read-chunk'
import { pFromCallback } from 'promise-toolbox'
async function v2(url, inputStream){
const {
allowUnauthorized,
host,
path,
username,
password,
protocol,
region = guessAwsRegion(host),
} = parse(url)
const client = new S3Client({
apiVersion: '2006-03-01',
endpoint: `${protocol}://s3.us-east-2.amazonaws.com`,
forcePathStyle: true,
credentials: {
accessKeyId: username,
secretAccessKey: password,
},
region,
requestHandler: new NodeHttpHandler({
socketTimeout: 600000,
httpAgent: new HttpAgent({
keepAlive: true,
}),
httpsAgent: new HttpsAgent({
rejectUnauthorized: !allowUnauthorized,
keepAlive: true,
}),
}),
})
const pathParts = split(path)
const bucket = pathParts.shift()
const dir = join(...pathParts)
const command = new CreateMultipartUploadCommand({
Bucket: bucket, Key: join(dir, 'flov2')
})
const multipart = await client.send(command)
console.log({multipart})
const parts = []
// monitor memory usage
const intervalMonitorMemoryUsage = setInterval(()=>console.log(Math.round(process.memoryUsage().rss/1024/1024)), 2000)
const CHUNK_SIZE = Math.ceil(5*1024*1024*1024*1024/10000) // smallest chunk allowing 5TB upload
async function read(inputStream, maxReadSize){
if(maxReadSize === 0){
return null
}
process.stdout.write('+')
const chunk = await readChunk(inputStream, maxReadSize)
process.stdout.write('@')
return chunk
}
async function write(data, chunkStream, remainingBytes){
const ready = chunkStream.write(data)
if(!ready){
process.stdout.write('.')
await pFromCallback(cb=> chunkStream.once('drain', cb))
process.stdout.write('@')
}
remainingBytes -= data.length
process.stdout.write(remainingBytes+' ')
return remainingBytes
}
async function uploadChunk(inputStream){
const PartNumber = parts.length +1
let done = false
let remainingBytes = CHUNK_SIZE
const maxChunkPartSize = Math.round(CHUNK_SIZE / 1000)
const chunkStream = new PassThrough()
console.log({maxChunkPartSize,CHUNK_SIZE})
let data
let chunkBuffer = []
const hash = createHash('md5');
try{
while((data = await read(inputStream, Math.min(remainingBytes, maxChunkPartSize))) !== null){
chunkBuffer.push(data)
hash.update(data)
remainingBytes -= data.length
//remainingBytes = await write(data, chunkStream, remainingBytes)
}
console.log('data put')
const fullBuffer = Buffer.alloc(maxChunkPartSize,0)
done = remainingBytes > 0
// add padding at the end of the file (not a problem for tar like : xva/ova)
// if not content length will not match and we'll have UND_ERR_REQ_CONTENT_LENGTH_MISMATCH error
console.log('full padding')
while(remainingBytes > maxChunkPartSize){
chunkBuffer.push(fullBuffer)
hash.update(fullBuffer)
remainingBytes -= maxChunkPartSize
//remainingBytes = await write(fullBuffer,chunkStream, remainingBytes)
}
console.log('full padding done ')
chunkBuffer.push(Buffer.alloc(remainingBytes,0))
hash.update(Buffer.alloc(remainingBytes,0))
console.log('md5 ok ')
//await write(Buffer.alloc(remainingBytes,0),chunkStream, remainingBytes)
// wait for the end of the upload
const command = new UploadPartCommand({
...multipart,
PartNumber,
ContentLength:CHUNK_SIZE,
Body: chunkStream,
ContentMD5 : hash.digest('base64')
})
const promise = client.send(command)
for (const buffer of chunkBuffer){
await write(buffer, chunkStream, remainingBytes)
}
chunkStream.on('error', err => console.error(err))
const res = await promise
console.log({res, headers : res.headers })
parts.push({ ETag:/*res.headers.get('etag') */res.ETag, PartNumber })
}catch(err){
console.error(err)
throw err
}
return done
}
while(!await uploadChunk(inputStream)){
console.log('uploaded one chunk', parts.length)
}
// mark the upload as complete and ask s3 to glue the chunk together
const completRes = await client.send(
new CompleteMultipartUploadCommand({
...multipart,
MultipartUpload: { Parts: parts },
})
)
console.log({completRes})
clearInterval(intervalMonitorMemoryUsage)
}
async function simplePut(url , inputStream){
const {
allowUnauthorized,
host,
path,
username,
password,
protocol,
region = guessAwsRegion(host),
} = parse(url)
const client = new S3Client({
apiVersion: '2006-03-01',
endpoint: `${protocol}://s3.us-east-2.amazonaws.com`,
forcePathStyle: true,
credentials: {
accessKeyId: username,
secretAccessKey: password,
},
region,
requestHandler: new NodeHttpHandler({
socketTimeout: 600000,
httpAgent: new HttpAgent({
keepAlive: true,
}),
httpsAgent: new HttpsAgent({
rejectUnauthorized: !allowUnauthorized,
keepAlive: true,
}),
}),
})
const pathParts = split(path)
const bucket = pathParts.shift()
const dir = join(...pathParts)
//const hasObjectLock = await client.send(new GetObjectLockConfigurationCommand({Bucket: bucket}))
//console.log(hasObjectLock.ObjectLockConfiguration?.ObjectLockEnabled === 'Enabled')
const md5 = await createMD5('/tmp/1g')
console.log({md5})
const command = new PutObjectCommand({
Bucket: bucket, Key: join(dir, 'simple'),
ContentMD5: md5,
ContentLength: 1024*1024*1024,
Body: inputStream
})
const intervalMonitorMemoryUsage = setInterval(()=>console.log(Math.round(process.memoryUsage().rss/1024/1024)), 2000)
const res = await client.send(command)
/*
const presignedUrl = await getSignedUrl(client, command,{ expiresIn: 3600 });
const res = await fetch(presignedUrl, {
method: 'PUT',
body:inputStream,
duplex: "half",
headers:{
"x-amz-decoded-content-length": 1024*1024*1024,
"content-md5" : md5
}
})*/
clearInterval(intervalMonitorMemoryUsage)
console.log(res)
}
async function createMD5(filePath) {
const input = await fs.open(filePath) // big ass file
return new Promise((res, rej) => {
const hash = createHash('md5');
const rStream = input.createReadStream(filePath);
rStream.on('data', (data) => {
hash.update(data);
});
rStream.on('end', () => {
res(hash.digest('base64'));
});
})
}
const input = await fs.open('/tmp/1g') // big ass file
const inputStream = input.createReadStream()
const remoteUrl = ""
v2(remoteUrl,inputStream)
//simplePut(remoteUrl,inputStream)

View File

@@ -46,7 +46,7 @@ watchEffect(() => {
color: var(--color-blue-scale-500);
border-radius: 0.5em;
background-color: var(--color-blue-scale-100);
z-index: 5;
z-index: 2;
}
.triangle {

View File

@@ -1,10 +1,10 @@
<template>
<MenuItem
v-tooltip="
!areAllVmsAllowedToMigrate && $t('some-selected-vms-can-not-be-migrated')
!areAllVmsMigratable && $t('some-selected-vms-can-not-be-migrated')
"
:busy="isMigrating"
:disabled="isParentDisabled || !areAllVmsAllowedToMigrate"
:disabled="isParentDisabled || !areAllVmsMigratable"
:icon="faRoute"
@click="openModal"
>
@@ -12,69 +12,33 @@
</MenuItem>
<UiModal v-model="isModalOpen">
<FormModalLayout
:disabled="!isReady || isMigrating"
@submit.prevent="handleMigrate"
>
<FormModalLayout :disabled="isMigrating" @submit.prevent="handleMigrate">
<template #title>
{{ $t("migrate-n-vms", { n: selectedRefs.length }) }}
</template>
<div>
<FormInputWrapper :label="$t('select-destination-host')" light>
<FormSelect v-model="selectedHostRef">
<option :value="undefined">{{ $t("please-select") }}</option>
<FormSelect v-model="selectedHost">
<option :value="undefined">
{{ $t("select-destination-host") }}
</option>
<option
v-for="host in availableHosts"
:key="host.$ref"
:value="host.$ref"
:value="host"
>
{{ host.name_label }}
</option>
</FormSelect>
</FormInputWrapper>
<FormInputWrapper
v-if="selectedHostRef !== undefined"
:label="$t('select-optional-migration-network')"
light
>
<FormSelect v-model="selectedMigrationNetworkRef">
<option :value="undefined">{{ $t("please-select") }}</option>
<option
v-for="network in availableNetworks"
:key="network.$ref"
:value="network.$ref"
>
{{ network.name_label }}
</option>
</FormSelect>
</FormInputWrapper>
<FormInputWrapper
v-if="selectedMigrationNetworkRef !== undefined"
:label="$t('select-destination-sr')"
light
>
<FormSelect v-model="selectedSrRef">
<option :value="undefined">{{ $t("please-select") }}</option>
<option v-for="sr in availableSrs" :key="sr.$ref" :value="sr.$ref">
{{ sr.name_label }}
</option>
</FormSelect>
</FormInputWrapper>
</div>
<template #buttons>
<UiButton outlined @click="closeModal" :disabled="false">
<UiButton outlined @click="closeModal">
{{ isMigrating ? $t("close") : $t("cancel") }}
</UiButton>
<UiButton
:busy="isMigrating"
:disabled="!canExecuteMigration"
v-tooltip="notMigratableReason ?? false"
type="submit"
>
<UiButton :busy="isMigrating" :disabled="!isValid" type="submit">
{{ $t("migrate-n-vms", { n: selectedRefs.length }) }}
</UiButton>
</template>
@@ -96,7 +60,6 @@ import { DisabledContext } from "@/context";
import { vTooltip } from "@/directives/tooltip.directive";
import type { XenApiVm } from "@/libs/xen-api/xen-api.types";
import { faRoute } from "@fortawesome/free-solid-svg-icons";
import { useI18n } from "vue-i18n";
const props = defineProps<{
selectedRefs: XenApiVm["$ref"][];
@@ -104,40 +67,29 @@ const props = defineProps<{
const isParentDisabled = useContext(DisabledContext);
const { t } = useI18n();
const {
open: openModal,
isOpen: isModalOpen,
close: closeModal,
} = useModal({
confirmClose: () => {
if (!isMigrating.value) {
return true;
}
return confirm(t("migration-close-warning"));
},
onClose: () => (selectedHostRef.value = undefined),
onClose: () => (selectedHost.value = undefined),
});
const {
isReady,
selectedHostRef,
selectedMigrationNetworkRef,
selectedSrRef,
selectedHost,
availableHosts,
availableNetworks,
availableSrs,
isValid,
migrate,
isMigrating,
canExecuteMigration,
notMigratableReason,
areAllVmsAllowedToMigrate,
areAllVmsMigratable,
} = useVmMigration(() => props.selectedRefs);
const handleMigrate = async () => {
await migrate();
closeModal();
try {
await migrate();
closeModal();
} catch (e) {
console.error("Error while migrating", e);
}
};
</script>

View File

@@ -95,14 +95,13 @@
import MenuItem from "@/components/menu/MenuItem.vue";
import PowerStateIcon from "@/components/PowerStateIcon.vue";
import UiIcon from "@/components/ui/icon/UiIcon.vue";
import { VM_OPERATION, VM_POWER_STATE } from "@/libs/xen-api/xen-api.enums";
import type { XenApiHost, XenApiVm } from "@/libs/xen-api/xen-api.types";
import { useXenApiStore } from "@/stores/xen-api.store";
import { useHostMetricsCollection } from "@/stores/xen-api/host-metrics.store";
import { useHostCollection } from "@/stores/xen-api/host.store";
import { useHostMetricsCollection } from "@/stores/xen-api/host-metrics.store";
import { usePoolCollection } from "@/stores/xen-api/pool.store";
import { useVmCollection } from "@/stores/xen-api/vm.store";
import type { MaybeArray } from "@/types";
import type { XenApiHost, XenApiVm } from "@/libs/xen-api/xen-api.types";
import { VM_POWER_STATE, VM_OPERATION } from "@/libs/xen-api/xen-api.enums";
import { useXenApiStore } from "@/stores/xen-api.store";
import {
faCirclePlay,
faMoon,
@@ -149,7 +148,7 @@ const areVmsPaused = computed(() =>
vms.value.every((vm) => vm.power_state === VM_POWER_STATE.PAUSED)
);
const areOperationsPending = (operation: MaybeArray<VM_OPERATION>) =>
const areOperationsPending = (operation: VM_OPERATION | VM_OPERATION[]) =>
vms.value.some((vm) => isOperationPending(vm, operation));
const areVmsBusyToStart = computed(() =>

View File

@@ -1,329 +1,82 @@
import { areCollectionsReady, sortRecordsByNameLabel } from "@/libs/utils";
import { VBD_TYPE, VM_OPERATION } from "@/libs/xen-api/xen-api.enums";
import type {
XenApiHost,
XenApiNetwork,
XenApiSr,
XenApiVdi,
XenApiVm,
} from "@/libs/xen-api/xen-api.types";
import { sortRecordsByNameLabel } from "@/libs/utils";
import { VM_OPERATION } from "@/libs/xen-api/xen-api.enums";
import type { XenApiHost, XenApiVm } from "@/libs/xen-api/xen-api.types";
import { useXenApiStore } from "@/stores/xen-api.store";
import { useHostCollection } from "@/stores/xen-api/host.store";
import { useNetworkCollection } from "@/stores/xen-api/network.store";
import { usePbdCollection } from "@/stores/xen-api/pbd.store";
import { usePifCollection } from "@/stores/xen-api/pif.store";
import { usePoolCollection } from "@/stores/xen-api/pool.store";
import { useSrCollection } from "@/stores/xen-api/sr.store";
import { useVbdCollection } from "@/stores/xen-api/vbd.store";
import { useVdiCollection } from "@/stores/xen-api/vdi.store";
import { useVmCollection } from "@/stores/xen-api/vm.store";
import type { MaybeArray } from "@/types";
import type { VmMigrationData } from "@/types/xen-api";
import { useMemoize } from "@vueuse/core";
import { castArray } from "lodash-es";
import type { MaybeRefOrGetter } from "vue";
import { computed, ref, toValue, watch } from "vue";
import { useI18n } from "vue-i18n";
import { computed, ref, toValue } from "vue";
export const useVmMigration = (
vmRefsToMigrate: MaybeRefOrGetter<MaybeArray<XenApiVm["$ref"]>>
vmRefs: MaybeRefOrGetter<XenApiVm["$ref"] | XenApiVm["$ref"][]>
) => {
const xapi = useXenApiStore().getXapi();
const poolCollection = usePoolCollection();
const hostCollection = useHostCollection();
const vmCollection = useVmCollection();
const vbdCollection = useVbdCollection();
const vdiCollection = useVdiCollection();
const srCollection = useSrCollection();
const networkCollection = useNetworkCollection();
const pbdCollection = usePbdCollection();
const pifCollection = usePifCollection();
const isReady = areCollectionsReady(
poolCollection,
hostCollection,
vmCollection,
vbdCollection,
vdiCollection,
srCollection,
networkCollection,
pbdCollection,
pifCollection
);
const { pool } = poolCollection;
const { getByOpaqueRef: getHost, records: hosts } = hostCollection;
const {
getByOpaqueRefs: getVms,
isOperationPending,
isOperationAllowed,
} = vmCollection;
const { getByOpaqueRefs: getVbds } = vbdCollection;
const { getByOpaqueRef: getVdi } = vdiCollection;
const { getByOpaqueRef: getSr } = srCollection;
const {
getByOpaqueRef: getNetwork,
getByOpaqueRefs: getNetworks,
getByUuid: getNetworkByUuid,
} = networkCollection;
const { getByOpaqueRefs: getPbds } = pbdCollection;
const { getByOpaqueRefs: getPifs } = pifCollection;
const selectedHostRef = ref<XenApiHost["$ref"]>();
const selectedHost = computed(() => getHost(selectedHostRef.value));
const selectedMigrationNetworkRef = ref<XenApiNetwork["$ref"]>();
const selectedMigrationNetwork = computed(() =>
getNetwork(selectedMigrationNetworkRef.value)
);
const selectedSrRef = ref<XenApiSr["$ref"]>();
const selectedSr = computed(() => getSr(selectedSrRef.value));
const isSimpleMigration = computed(
() => selectedMigrationNetworkRef.value === undefined
);
const availableHosts = computed(() =>
hosts.value
.filter((host) =>
vmsToMigrate.value.some((vm) => vm.resident_on !== host.$ref)
)
.sort(sortRecordsByNameLabel)
);
const getPifsForSelectedHost = () =>
getPifs(selectedHost.value!.PIFs).filter((pif) => pif.IP);
const availableNetworks = computed(() => {
if (!selectedHost.value) {
return [];
}
return getNetworks(getPifsForSelectedHost().map((pif) => pif.network));
});
const availableSrs = computed(() => {
if (!selectedHost.value) {
return [];
}
const srs = new Set<XenApiSr>();
getPbds(selectedHost.value!.PBDs).forEach((pbd) => {
const sr = getSr(pbd.SR);
if (
sr !== undefined &&
sr.content_type !== "iso" &&
sr.physical_size > 0
) {
srs.add(sr);
}
});
return Array.from(srs);
});
const $isMigrating = ref(false);
const selectedHost = ref<XenApiHost>();
const { getByOpaqueRef: getVm } = useVmCollection();
const { records: hosts } = useHostCollection();
const vmsToMigrate = computed(() =>
getVms(castArray(toValue(vmRefsToMigrate)))
const vms = computed(
() =>
castArray(toValue(vmRefs))
.map((vmRef) => getVm(vmRef))
.filter((vm) => vm !== undefined) as XenApiVm[]
);
const getVmVbds = (vm: XenApiVm) =>
getVms(vm.snapshots).reduce(
(acc, vm) => acc.concat(getVbds(vm.VBDs)),
getVbds(vm.VBDs)
);
const getVmVdis = (
vmToMigrate: XenApiVm,
destinationHost: XenApiHost,
forcedSr?: XenApiSr
) =>
getVmVbds(vmToMigrate).reduce(
(acc, vbd) => {
if (vbd.type !== VBD_TYPE.DISK) {
return acc;
}
const vdi = getVdi(vbd.VDI);
if (vdi === undefined || vdi.snapshot_of !== "OpaqueRef:NULL") {
return acc;
}
acc[vdi.$ref] = isSrConnected(vdi.SR, destinationHost)
? vdi.SR
: forcedSr !== undefined
? forcedSr.$ref
: getDefaultSr().$ref;
return acc;
},
{} as Record<XenApiVdi["$ref"], XenApiSr["$ref"]>
);
const isSrConnected = useMemoize(
(srRef: XenApiSr["$ref"], destinationHost: XenApiHost) =>
getSr(srRef)?.PBDs.some((pbdRef) =>
destinationHost.PBDs.includes(pbdRef)
) ?? false
);
const getDefaultMigrationNetwork = () => {
if (selectedHost.value === undefined) {
return undefined;
}
const migrationNetworkUuid = pool.value!.other_config[
"xo:migrationNetwork"
] as XenApiNetwork["uuid"];
const migrationNetwork = getNetworkByUuid(migrationNetworkUuid);
if (migrationNetwork === undefined) {
return undefined;
}
if (
getPifsForSelectedHost().some(
(pif) => pif.network === migrationNetwork.$ref
)
) {
return migrationNetwork;
}
return undefined;
};
const getDefaultSr = () => {
const defaultSr = getSr(pool.value?.default_SR);
if (defaultSr === undefined) {
throw new Error(
`This operation requires a default SR to be set on the pool ${
pool.value!.name_label
}`
);
}
return defaultSr;
};
watch(selectedHost, (host) => {
if (host === undefined) {
selectedMigrationNetworkRef.value = undefined;
return;
}
selectedMigrationNetworkRef.value = getDefaultMigrationNetwork()?.$ref;
});
watch(selectedMigrationNetworkRef, (networkRef) => {
if (networkRef === undefined) {
selectedSrRef.value = undefined;
return;
}
selectedSrRef.value = getDefaultSr().$ref;
});
const isMigrating = computed(
() =>
$isMigrating.value ||
isOperationPending(vmsToMigrate.value, [
VM_OPERATION.MIGRATE_SEND,
VM_OPERATION.POOL_MIGRATE,
])
vms.value.some((vm) =>
Object.values(vm.current_operations).some(
(operation) => operation === VM_OPERATION.POOL_MIGRATE
)
)
);
const areAllVmsAllowedToMigrate = computed(() =>
isOperationAllowed(
vmsToMigrate.value,
isSimpleMigration.value
? VM_OPERATION.POOL_MIGRATE
: VM_OPERATION.MIGRATE_SEND
const availableHosts = computed(() => {
return hosts.value
.filter((host) => vms.value.some((vm) => vm.resident_on !== host.$ref))
.sort(sortRecordsByNameLabel);
});
const areAllVmsMigratable = computed(() =>
vms.value.every((vm) =>
vm.allowed_operations.includes(VM_OPERATION.POOL_MIGRATE)
)
);
const { t } = useI18n();
const notMigratableReason = computed(() => {
if (isMigrating.value) {
return t("vms-migration-error.already-being-migrated");
}
if (!areAllVmsAllowedToMigrate.value) {
return t("vms-migration-error.not-allowed");
}
if (selectedHost.value === undefined) {
return t("vms-migration-error.no-destination-host");
}
if (isSimpleMigration.value) {
return undefined;
}
if (selectedMigrationNetwork.value === undefined) {
return t("vms-migration-error.no-migration-network");
}
if (selectedSr.value === undefined) {
return t("vms-migration-error.no-destination-sr");
}
return undefined;
});
const canExecuteMigration = computed(
() => notMigratableReason.value === undefined
const isValid = computed(
() =>
!isMigrating.value &&
vms.value.length > 0 &&
selectedHost.value !== undefined
);
const migrateSimple = () =>
xapi.vm.migrate(
vmsToMigrate.value.map((vm) => vm.$ref),
selectedHostRef.value!
);
const migrateComplex = () => {
const vmsMigrationMap: Record<XenApiVm["$ref"], VmMigrationData> = {};
vmsToMigrate.value.forEach((vm) => {
vmsMigrationMap[vm.$ref] = {
destinationHost: selectedHostRef.value!,
destinationSr: selectedSrRef.value!,
migrationNetwork: selectedMigrationNetworkRef.value!,
vdisMap: getVmVdis(vm, selectedHost.value!, selectedSr.value!),
};
});
return xapi.vm.migrateComplex(vmsMigrationMap);
};
const migrate = async () => {
if (!canExecuteMigration.value) {
if (!isValid.value) {
return;
}
try {
$isMigrating.value = true;
isSimpleMigration.value ? await migrateSimple() : await migrateComplex();
const hostRef = selectedHost.value!.$ref;
const xapi = useXenApiStore().getXapi();
await xapi.vm.migrate(
vms.value.map((vm) => vm.$ref),
hostRef
);
} finally {
$isMigrating.value = false;
}
};
return {
isReady,
isMigrating,
areAllVmsAllowedToMigrate,
canExecuteMigration,
notMigratableReason,
availableHosts,
availableNetworks,
availableSrs,
selectedHostRef,
selectedMigrationNetworkRef,
selectedSrRef,
selectedHost,
areAllVmsMigratable,
isValid,
migrate,
};
};

View File

@@ -14,20 +14,10 @@ export const useXenApiStoreBaseContext = <
const lastError = ref<string>();
const hasError = computed(() => lastError.value !== undefined);
const getByOpaqueRef = (opaqueRef: XRecord["$ref"] | undefined) => {
if (opaqueRef === undefined) {
return undefined;
}
const getByOpaqueRef = (opaqueRef: XRecord["$ref"]) => {
return recordsByOpaqueRef.get(opaqueRef);
};
const getByOpaqueRefs = (opaqueRefs: XRecord["$ref"][]) => {
return opaqueRefs
.map((opaqueRef) => recordsByOpaqueRef.get(opaqueRef))
.filter((record) => record !== undefined) as XRecord[];
};
const getByUuid = (uuid: XRecord["uuid"]) => {
return recordsByUuid.get(uuid);
};
@@ -59,7 +49,6 @@ export const useXenApiStoreBaseContext = <
lastError,
records,
getByOpaqueRef,
getByOpaqueRefs,
getByUuid,
hasUuid,
add,

View File

@@ -1,11 +1,9 @@
import type { MaybeArray } from "@/types";
import type { Filter } from "@/types/filter";
import { faSquareCheck } from "@fortawesome/free-regular-svg-icons";
import { faFont, faHashtag, faList } from "@fortawesome/free-solid-svg-icons";
import { utcParse } from "d3-time-format";
import humanFormat from "human-format";
import { find, forEach, round, size, sum } from "lodash-es";
import { computed, type Ref } from "vue";
export function sortRecordsByNameLabel(
record1: { name_label: string },
@@ -142,9 +140,5 @@ export function parseRamUsage(
};
}
export const getFirst = <T>(value: MaybeArray<T>): T | undefined =>
export const getFirst = <T>(value: T | T[]): T | undefined =>
Array.isArray(value) ? value[0] : value;
export const areCollectionsReady = (
...collections: { isReady: Ref<boolean> }[]
) => computed(() => collections.every(({ isReady }) => isReady.value));

View File

@@ -16,13 +16,6 @@ import type {
XenApiVm,
} from "@/libs/xen-api/xen-api.types";
import { buildXoObject, typeToRawType } from "@/libs/xen-api/xen-api.utils";
import type { MaybeArray } from "@/types";
import type {
VmRefsWithMigration,
VmRefsWithNameLabel,
VmRefsWithPowerState,
XenApiMigrationParams,
} from "@/types/xen-api";
import { JSONRPCClient } from "json-rpc-2.0";
import { castArray } from "lodash-es";
@@ -274,6 +267,8 @@ export default class XenApi {
return;
}
await new Promise((resolve) => setTimeout(resolve, 2000));
if (this.listenedTypes.length === 0) {
void this.watch();
return;
@@ -282,7 +277,11 @@ export default class XenApi {
const result: {
token: string;
events: XenApiEvent<ObjectType, XenApiRecord<any>>[];
} = await this.call("event.from", [this.listenedTypes, this.fromToken, 60]);
} = await this.call("event.from", [
this.listenedTypes,
this.fromToken,
5.001,
]);
this.fromToken = result.token;
@@ -292,31 +291,35 @@ export default class XenApi {
}
get vm() {
type VmRefs = XenApiVm["$ref"] | XenApiVm["$ref"][];
type VmRefsWithPowerState = Record<
XenApiVm["$ref"],
XenApiVm["power_state"]
>;
type VmRefsWithNameLabel = Record<XenApiVm["$ref"], string>;
return {
delete: (vmRefs: MaybeArray<XenApiVm["$ref"]>) =>
delete: (vmRefs: VmRefs) =>
Promise.all(
castArray(vmRefs).map((vmRef) => this.call("VM.destroy", [vmRef]))
),
start: (vmRefs: MaybeArray<XenApiVm["$ref"]>) =>
start: (vmRefs: VmRefs) =>
Promise.all(
castArray(vmRefs).map((vmRef) =>
this.call("VM.start", [vmRef, false, false])
)
),
startOn: (
vmRefs: MaybeArray<XenApiVm["$ref"]>,
hostRef: XenApiHost["$ref"]
) =>
startOn: (vmRefs: VmRefs, hostRef: XenApiHost["$ref"]) =>
Promise.all(
castArray(vmRefs).map((vmRef) =>
this.call("VM.start_on", [vmRef, hostRef, false, false])
)
),
pause: (vmRefs: MaybeArray<XenApiVm["$ref"]>) =>
pause: (vmRefs: VmRefs) =>
Promise.all(
castArray(vmRefs).map((vmRef) => this.call("VM.pause", [vmRef]))
),
suspend: (vmRefs: MaybeArray<XenApiVm["$ref"]>) => {
suspend: (vmRefs: VmRefs) => {
return Promise.all(
castArray(vmRefs).map((vmRef) => this.call("VM.suspend", [vmRef]))
);
@@ -334,14 +337,14 @@ export default class XenApi {
})
);
},
reboot: (vmRefs: MaybeArray<XenApiVm["$ref"]>, force = false) => {
reboot: (vmRefs: VmRefs, force = false) => {
return Promise.all(
castArray(vmRefs).map((vmRef) =>
this.call(`VM.${force ? "hard" : "clean"}_reboot`, [vmRef])
)
);
},
shutdown: (vmRefs: MaybeArray<XenApiVm["$ref"]>, force = false) => {
shutdown: (vmRefs: VmRefs, force = false) => {
return Promise.all(
castArray(vmRefs).map((vmRef) =>
this.call(`VM.${force ? "hard" : "clean"}_shutdown`, [vmRef])
@@ -357,10 +360,7 @@ export default class XenApi {
)
);
},
migrate: (
vmRefs: MaybeArray<XenApiVm["$ref"]>,
destinationHostRef: XenApiHost["$ref"]
) => {
migrate: (vmRefs: VmRefs, destinationHostRef: XenApiHost["$ref"]) => {
return Promise.all(
castArray(vmRefs).map((vmRef) =>
this.call("VM.pool_migrate", [
@@ -371,49 +371,6 @@ export default class XenApi {
)
);
},
migrateComplex: (vmRefsToMigrate: VmRefsWithMigration) => {
const vmRefs = Object.keys(vmRefsToMigrate) as XenApiVm["$ref"][];
return Promise.all(
vmRefs.map(async (vmRef) => {
const migrateData = vmRefsToMigrate[vmRef];
const params: XenApiMigrationParams = [
vmRef,
await this.call("host.migrate_receive", [
migrateData.destinationHost,
migrateData.migrationNetwork,
{},
]),
true, // Live migration
migrateData.vdisMap,
{}, // vifsMap,
{
force: migrateData.force ? "true" : "false",
},
];
if (!migrateData.bypassAssert) {
await this.call("VM.assert_can_migrate", params);
}
const doMigration = async () => {
try {
await this.call("VM.migrate_send", params);
} catch (error: any) {
if (error?.code === "TOO_MANY_STORAGE_MIGRATES") {
await new Promise((resolve) => setTimeout(resolve, 1000));
await doMigration();
} else {
throw error;
}
}
};
await doMigration();
})
);
},
snapshot: (vmRefsToSnapshot: VmRefsWithNameLabel) => {
const vmRefs = Object.keys(vmRefsToSnapshot) as XenApiVm["$ref"][];

View File

@@ -1,14 +1,9 @@
import type {
AFTER_APPLY_GUIDANCE,
ALLOCATION_ALGORITHM,
BOND_MODE,
CERTIFICATE_TYPE,
DOMAIN_TYPE,
HOST_ALLOWED_OPERATION,
HOST_DISPLAY,
IP_CONFIGURATION_MODE,
IPV6_CONFIGURATION_MODE,
LATEST_SYNCED_UPDATES_APPLIED_STATE,
NETWORK_DEFAULT_LOCKING_MODE,
NETWORK_OPERATION,
NETWORK_PURPOSE,
@@ -19,15 +14,10 @@ import type {
PERSISTENCE_BACKEND,
PGPU_DOM0_ACCESS,
PIF_IGMP_STATUS,
POOL_ALLOWED_OPERATION,
PRIMARY_ADDRESS_TYPE,
SRIOV_CONFIGURATION_MODE,
STORAGE_OPERATION,
TELEMETRY_FREQUENCY,
TUNNEL_PROTOCOL,
UPDATE_AFTER_APPLY_GUIDANCE,
UPDATE_GUIDANCE,
UPDATE_SYNC_FREQUENCY,
VBD_MODE,
VBD_OPERATION,
VBD_TYPE,
@@ -69,12 +59,6 @@ type ObjectTypeToRecordMapping = {
vm: XenApiVm;
vm_guest_metrics: XenApiVmGuestMetrics;
vm_metrics: XenApiVmMetrics;
vbd: XenApiVbd;
vdi: XenApiVdi;
vif: XenApiVif;
pif: XenApiPif;
network: XenApiNetwork;
pbd: XenApiPbd;
};
export type ObjectTypeToRecord<Type extends ObjectType> =
@@ -112,255 +96,26 @@ export type RawXenApiRecord<T extends XenApiRecord<ObjectType>> = Omit<
>;
export interface XenApiPool extends XenApiRecord<"pool"> {
allowed_operations: POOL_ALLOWED_OPERATION[];
blobs: Record<string, XenApiBlob["$ref"]>;
client_certificate_auth_enabled: boolean;
client_certificate_auth_name: string;
coordinator_bias: boolean;
cpu_info: Record<string, string> & { cpu_count: string };
crash_dump_SR: XenApiSr["$ref"];
current_operations: Record<string, POOL_ALLOWED_OPERATION>;
default_SR: XenApiSr["$ref"];
guest_agent_config: Record<string, string>;
gui_config: Record<string, string>;
ha_allow_overcommit: boolean;
ha_cluster_stack: string;
ha_configuration: Record<string, string>;
ha_enabled: boolean;
ha_host_failures_to_tolerate: number;
ha_overcommitted: boolean;
ha_plan_exists_for: number;
ha_statefiles: string[];
health_check_config: Record<string, string>;
igmp_snooping_enabled: boolean;
is_psr_pending: boolean;
last_update_sync: string;
live_patching_disabled: boolean;
cpu_info: {
cpu_count: string;
};
master: XenApiHost["$ref"];
metadata_VDIs: XenApiVdi["$ref"][];
migration_compression: boolean;
name_description: string;
name_label: string;
other_config: Record<string, string>;
policy_no_vendor_device: boolean;
redo_log_enabled: boolean;
redo_log_vdi: XenApiVdi["$ref"];
repositories: XenApiRepository["$ref"][];
repository_proxy_password: XenApiSecret["$ref"];
repository_proxy_url: string;
repository_proxy_username: string;
restrictions: Record<string, string>;
suspend_image_SR: XenApiSr["$ref"];
tags: string[];
telemetry_frequency: TELEMETRY_FREQUENCY;
telemetry_next_collection: string;
telemetry_uuid: XenApiSecret["$ref"];
tls_verification_enabled: boolean;
uefi_certificates: string;
update_sync_day: number;
update_sync_enabled: boolean;
update_sync_frequency: UPDATE_SYNC_FREQUENCY;
vswitch_controller: string;
wlb_enabled: boolean;
wlb_url: string;
wlb_username: string;
wlb_verify_cert: boolean;
}
export interface XenApiSecret extends XenApiRecord<"secret"> {
other_config: Record<string, string>;
value: string;
}
export interface XenApiRepository extends XenApiRecord<"repository"> {
binary_url: string;
gpgkey_path: string;
hash: string;
name_description: string;
name_label: string;
source_url: string;
up_to_date: boolean;
update: boolean;
}
export interface XenApiHost extends XenApiRecord<"host"> {
API_version_major: number;
API_version_minor: number;
API_version_vendor: string;
API_version_vendor_implementation: Record<string, string>;
PBDs: XenApiPbd["$ref"][];
PCIs: XenApiPci["$ref"][];
PGPUs: XenApiPgpu["$ref"][];
PIFs: XenApiPif["$ref"][];
PUSBs: XenApiPusb["$ref"][];
address: string;
allowed_operations: HOST_ALLOWED_OPERATION[];
bios_strings: Record<string, string>;
blobs: Record<string, XenApiBlob["$ref"]>;
capabilities: string[];
certificates: XenApiCertificate["$ref"][];
chipset_info: Record<string, string>;
control_domain: XenApiVm["$ref"];
cpu_configuration: Record<string, string>;
cpu_info: Record<string, string> & { cpu_count: string };
crash_dump_sr: XenApiSr["$ref"];
crashdumps: XenApiHostCrashdump["$ref"][];
current_operations: Record<string, HOST_ALLOWED_OPERATION>;
display: HOST_DISPLAY;
edition: string;
editions: string[];
enabled: boolean;
external_auth_configuration: Record<string, string>;
external_auth_service_name: string;
external_auth_type: string;
features: XenApiFeature["$ref"][];
guest_VCPUs_params: Record<string, string>;
ha_network_peers: string[];
ha_statefiles: string[];
host_CPUs: XenApiHostCpu["$ref"][];
hostname: string;
https_only: boolean;
iscsi_iqn: string;
last_software_update: string;
latest_synced_updates_applied: LATEST_SYNCED_UPDATES_APPLIED_STATE;
license_params: Record<string, string>;
license_server: Record<string, string>;
local_cache_sr: XenApiSr["$ref"];
logging: Record<string, string>;
memory_overhead: number;
name_label: string;
metrics: XenApiHostMetrics["$ref"];
multipathing: boolean;
name_description: string;
name_label: string;
other_config: Record<string, string>;
patches: XenApiHostPatch["$ref"][];
pending_guidances: UPDATE_GUIDANCE[];
power_on_config: Record<string, string>;
power_on_mode: string;
resident_VMs: XenApiVm["$ref"][];
sched_policy: string;
software_version: Record<string, string> & { product_version: string };
ssl_legacy: boolean;
supported_bootloaders: string[];
suspend_image_sr: XenApiSr["$ref"];
tags: string[];
tls_verification_enabled: boolean;
uefi_certificates: string;
updates: XenApiPoolUpdate["$ref"][];
updates_requiring_reboot: XenApiPoolUpdate["$ref"][];
virtual_hardware_platform_versions: number[];
}
export interface XenApiCertificate extends XenApiRecord<"certificate"> {
fingerprint: string;
host: XenApiHost["$ref"];
name: string;
not_after: string;
not_before: string;
type: CERTIFICATE_TYPE;
}
export interface XenApiHostCrashdump extends XenApiRecord<"host_crashdump"> {
host: XenApiHost["$ref"];
other_config: Record<string, string>;
size: number;
timestamp: string;
}
export interface XenApiFeature extends XenApiRecord<"feature"> {
enabled: boolean;
experimental: boolean;
host: XenApiHost["$ref"];
name_description: string;
name_label: string;
version: string;
}
export interface XenApiHostCpu extends XenApiRecord<"host_cpu"> {
family: number;
features: string;
flags: string;
host: XenApiHost["$ref"];
model: number;
modelname: string;
number: number;
other_config: Record<string, string>;
speed: number;
stepping: string;
utilisation: number;
vendor: string;
}
export interface XenApiPbd extends XenApiRecord<"pbd"> {
SR: XenApiSr["$ref"];
currently_attached: boolean;
device_config: Record<string, string>;
host: XenApiHost["$ref"];
other_config: Record<string, string>;
}
export interface XenApiPoolUpdate extends XenApiRecord<"pool_update"> {
after_apply_guidance: UPDATE_AFTER_APPLY_GUIDANCE[];
enforce_homogeneity: boolean;
hosts: XenApiHost["$ref"][];
installation_size: number;
key: string;
name_description: string;
name_label: string;
other_config: Record<string, string>;
vdi: XenApiVdi["$ref"];
version: string;
}
export interface XenApiHostPatch extends XenApiRecord<"host_patch"> {
applied: boolean;
host: XenApiHost["$ref"];
name_description: string;
name_label: string;
other_config: Record<string, string>;
pool_patch: XenApiPoolPatch["$ref"];
size: number;
timestamp_applied: string;
version: string;
}
export interface XenApiPoolPatch extends XenApiRecord<"pool_patch"> {
after_apply_guidance: AFTER_APPLY_GUIDANCE[];
host_patches: XenApiHostPatch["$ref"][];
name_description: string;
name_label: string;
other_config: Record<string, string>;
pool_applied: boolean;
pool_update: XenApiPoolUpdate["$ref"];
size: number;
version: string;
cpu_info: { cpu_count: string };
software_version: { product_version: string };
}
export interface XenApiSr extends XenApiRecord<"sr"> {
PBDs: XenApiPbd["$ref"][];
VDIs: XenApiVdi["$ref"][];
allowed_operations: STORAGE_OPERATION[];
blobs: Record<string, XenApiBlob["$ref"]>;
clustered: boolean;
content_type: string;
current_operations: Record<string, STORAGE_OPERATION>;
introduced_by: XenApiDrTask["$ref"];
is_tools_sr: boolean;
local_cache_enabled: boolean;
name_description: string;
name_label: string;
other_config: Record<string, string>;
physical_size: number;
physical_utilisation: number;
shared: boolean;
sm_config: Record<string, string>;
tags: string[];
type: string;
virtual_allocation: number;
}
export interface XenApiDrTask extends XenApiRecord<"dr_task"> {
introduced_SRs: XenApiSr["$ref"][];
}
export interface XenApiVm extends XenApiRecord<"vm"> {

View File

@@ -25,7 +25,6 @@ export const XEN_API_OBJECT_TYPES = {
pvs_proxy: "PVS_proxy",
pvs_server: "PVS_server",
pvs_site: "PVS_site",
repository: "repository",
sdn_controller: "SDN_controller",
sm: "SM",
sr: "SR",

View File

@@ -87,7 +87,6 @@
"login": "Login",
"migrate": "Migrate",
"migrate-n-vms": "Migrate 1 VM | Migrate {n} VMs",
"migration-close-warning": "Warning: If you close this window, failed migration attempts will not be retried.",
"n-hosts-awaiting-patch": "{n} host is awaiting this patch | {n} hosts are awaiting this patch",
"n-missing": "{n} missing",
"n-vms": "1 VM | {n} VMs",
@@ -113,7 +112,6 @@
"patches": "Patches",
"pause": "Pause",
"please-confirm": "Please confirm",
"please-select": "Please select",
"pool-cpu-usage": "Pool CPU Usage",
"pool-ram-usage": "Pool RAM Usage",
"power-on-for-console": "Power on your VM to access its console",
@@ -136,8 +134,6 @@
"resume": "Resume",
"save": "Save",
"select-destination-host": "Select a destination host",
"select-destination-sr": "Select a destination SR",
"select-optional-migration-network": "Select a migration network (optional)",
"selected-vms-in-execution": "Some selected VMs are running",
"send-us-feedback": "Send us feedback",
"settings": "Settings",
@@ -177,12 +173,5 @@
"vcpus-used": "vCPUs used",
"version": "Version",
"vms": "VMs",
"vms-migration-error": {
"already-being-migrated": "At least one selected VM is already being migrated",
"not-allowed": "Some VMs are not allowed to be migrated",
"no-destination-host": "No destination host has been selected",
"no-migration-network": "No migration network has been selected",
"no-destination-sr": "No destination SR has been selected"
},
"xo-lite-under-construction": "XOLite is under construction"
}

View File

@@ -87,7 +87,6 @@
"login": "Connexion",
"migrate": "Migrer",
"migrate-n-vms": "Migrer 1 VM | Migrer {n} VMs",
"migration-close-warning": "Attention : Si vous fermez cette fenêtre, les tentatives de migration échouées ne seront pas réessayées.",
"n-hosts-awaiting-patch": "{n} hôte attend ce patch | {n} hôtes attendent ce patch",
"n-missing": "{n} manquant | {n} manquants",
"n-vms": "1 VM | {n} VMs",
@@ -113,7 +112,6 @@
"patches": "Patches",
"pause": "Pause",
"please-confirm": "Veuillez confirmer",
"please-select": "Veuillez sélectionner",
"pool-cpu-usage": "Utilisation CPU du Pool",
"pool-ram-usage": "Utilisation RAM du Pool",
"power-on-for-console": "Allumez votre VM pour accéder à sa console",
@@ -175,12 +173,5 @@
"vcpus-used": "vCPUs utilisés",
"version": "Version",
"vms": "VMs",
"vms-migration-error": {
"already-being-migrated": "Au moins une VM sélectionnée est déjà en cours de migration",
"not-allowed": "Certaines VM ne sont pas autorisées à être migrées",
"no-destination-host": "Aucun hôte de destination n'a été sélectionné",
"no-migration-network": "Aucun réseau de migration n'a été sélectionné",
"no-destination-sr": "Aucun SR de destination n'a été sélectionné"
},
"xo-lite-under-construction": "XOLite est en construction"
}

View File

@@ -1,9 +0,0 @@
import { useXenApiStoreSubscribableContext } from "@/composables/xen-api-store-subscribable-context.composable";
import { createUseCollection } from "@/stores/xen-api/create-use-collection";
import { defineStore } from "pinia";
export const useNetworkStore = defineStore("xen-api-network", () => {
return useXenApiStoreSubscribableContext("network");
});
export const useNetworkCollection = createUseCollection(useNetworkStore);

View File

@@ -1,9 +0,0 @@
import { useXenApiStoreSubscribableContext } from "@/composables/xen-api-store-subscribable-context.composable";
import { createUseCollection } from "@/stores/xen-api/create-use-collection";
import { defineStore } from "pinia";
export const usePbdStore = defineStore("xen-api-pbd", () => {
return useXenApiStoreSubscribableContext("pbd");
});
export const usePbdCollection = createUseCollection(usePbdStore);

View File

@@ -1,9 +0,0 @@
import { useXenApiStoreSubscribableContext } from "@/composables/xen-api-store-subscribable-context.composable";
import { createUseCollection } from "@/stores/xen-api/create-use-collection";
import { defineStore } from "pinia";
export const usePifStore = defineStore("xen-api-pif", () => {
return useXenApiStoreSubscribableContext("pif");
});
export const usePifCollection = createUseCollection(usePifStore);

View File

@@ -1,9 +0,0 @@
import { useXenApiStoreSubscribableContext } from "@/composables/xen-api-store-subscribable-context.composable";
import { createUseCollection } from "@/stores/xen-api/create-use-collection";
import { defineStore } from "pinia";
export const useVbdStore = defineStore("xen-api-vbd", () => {
return useXenApiStoreSubscribableContext("vbd");
});
export const useVbdCollection = createUseCollection(useVbdStore);

View File

@@ -1,9 +0,0 @@
import { useXenApiStoreSubscribableContext } from "@/composables/xen-api-store-subscribable-context.composable";
import { createUseCollection } from "@/stores/xen-api/create-use-collection";
import { defineStore } from "pinia";
export const useVdiStore = defineStore("xen-api-vdi", () => {
return useXenApiStoreSubscribableContext("vdi");
});
export const useVdiCollection = createUseCollection(useVdiStore);

View File

@@ -2,15 +2,14 @@ import type { GetStats } from "@/composables/fetch-stats.composable";
import { useXenApiStoreSubscribableContext } from "@/composables/xen-api-store-subscribable-context.composable";
import { sortRecordsByNameLabel } from "@/libs/utils";
import type { VmStats } from "@/libs/xapi-stats";
import type { XenApiHost, XenApiVm } from "@/libs/xen-api/xen-api.types";
import {
type VM_OPERATION,
VM_POWER_STATE,
} from "@/libs/xen-api/xen-api.enums";
import type { XenApiHost, XenApiVm } from "@/libs/xen-api/xen-api.types";
import { useXenApiStore } from "@/stores/xen-api.store";
import { createUseCollection } from "@/stores/xen-api/create-use-collection";
import { useHostStore } from "@/stores/xen-api/host.store";
import type { MaybeArray } from "@/types";
import { castArray } from "lodash-es";
import { defineStore } from "pinia";
import { computed } from "vue";
@@ -26,29 +25,16 @@ export const useVmStore = defineStore("xen-api-vm", () => {
.sort(sortRecordsByNameLabel)
);
const hasOperation = (
vms: MaybeArray<XenApiVm>,
operations: MaybeArray<VM_OPERATION>,
operationType: "current_operations" | "allowed_operations"
) => {
return castArray(vms).some((vm) => {
const currentOperations = Object.values(vm[operationType]);
return castArray(operations).some((operation) =>
currentOperations.includes(operation)
);
});
};
const isOperationPending = (
vms: XenApiVm | XenApiVm[],
operations: MaybeArray<VM_OPERATION>
) => hasOperation(vms, operations, "current_operations");
vm: XenApiVm,
operations: VM_OPERATION[] | VM_OPERATION
) => {
const currentOperations = Object.values(vm.current_operations);
const isOperationAllowed = (
vms: MaybeArray<XenApiVm>,
operations: MaybeArray<VM_OPERATION>
) => hasOperation(vms, operations, "allowed_operations");
return castArray(operations).some((operation) =>
currentOperations.includes(operation)
);
};
const runningVms = computed(() =>
records.value.filter((vm) => vm.power_state === VM_POWER_STATE.RUNNING)
@@ -106,7 +92,6 @@ export const useVmStore = defineStore("xen-api-vm", () => {
...context,
records,
isOperationPending,
isOperationAllowed,
runningVms,
recordsByHostRef,
getStats,

View File

@@ -1,3 +1 @@
export type Color = "info" | "error" | "warning" | "success";
export type MaybeArray<T> = T | T[];

View File

@@ -1,11 +1,6 @@
import type {
RawObjectType,
XenApiHost,
XenApiMessage,
XenApiNetwork,
XenApiSr,
XenApiVdi,
XenApiVm,
} from "@/libs/xen-api/xen-api.types";
export type XenApiAlarmType =
@@ -42,32 +37,3 @@ export type XenApiPatch = {
author: string;
};
};
export type XenApiMigrationToken = Record<string, string>;
export type XenApiMigrationParams = [
XenApiVm["$ref"],
XenApiMigrationToken,
boolean,
Record<XenApiVdi["$ref"], XenApiSr["$ref"]>,
Record<any, never>,
{ force: "true" | "false" },
];
export type VmRefsWithPowerState = Record<
XenApiVm["$ref"],
XenApiVm["power_state"]
>;
export type VmRefsWithNameLabel = Record<XenApiVm["$ref"], string>;
export type VmMigrationData = {
destinationHost: XenApiHost["$ref"];
migrationNetwork: XenApiNetwork["$ref"];
destinationSr: XenApiSr["$ref"];
vdisMap: Record<XenApiVdi["$ref"], XenApiSr["$ref"]>;
force?: boolean;
bypassAssert?: boolean;
};
export type VmRefsWithMigration = Record<XenApiVm["$ref"], VmMigrationData>;

View File

@@ -81,10 +81,7 @@ const selectedVmsRefs = ref([]);
titleStore.setCount(() => selectedVmsRefs.value.length);
const isMigrating = (vm: XenApiVm) =>
isOperationPending(vm, [
VM_OPERATION.POOL_MIGRATE,
VM_OPERATION.MIGRATE_SEND,
]);
isOperationPending(vm, VM_OPERATION.POOL_MIGRATE);
</script>
<style lang="postcss" scoped>

View File

@@ -134,22 +134,23 @@ class Vdi {
if (stream.length === undefined) {
throw new Error('Trying to import a VDI without a length field. Please report this error to Xen Orchestra.')
}
const vdi = await this.getRecord('VDI', ref)
const sr = await this.getRecord('SR', vdi.SR)
try {
await this.putResource(cancelToken, stream, '/import_raw_vdi/', {
query: {
format,
vdi: ref,
},
task: await this.task_create(`Importing content into VDI ${await this.getField('VDI', ref, 'name_label')}`),
task: await this.task_create(`Importing content into VDI ${vdi.name_label} on SR ${sr.name_label}`),
})
} catch (error) {
// augment the error with as much relevant info as possible
const [poolMaster, vdi] = await Promise.all([
this.getRecord('host', this.pool.master),
this.getRecord('VDI', ref),
])
const poolMaster = await this.getRecord('host', this.pool.master)
error.pool_master = poolMaster
error.SR = await this.getRecord('SR', vdi.SR)
error.SR = sr
error.VDI = vdi
throw error
}

View File

@@ -13,6 +13,7 @@
- [Backup/Mirror] Fix backup report not being sent (PR [#7049](https://github.com/vatesfr/xen-orchestra/pull/7049))
- [New VM] Only add MBR to cloud-init drive on Windows VMs to avoid booting issues (e.g. with Talos) (PR [#7050](https://github.com/vatesfr/xen-orchestra/pull/7050))
- [VDI Import] Add the SR name to the corresponding XAPI task (PR [#6979](https://github.com/vatesfr/xen-orchestra/pull/6979))
### Packages to release
@@ -30,7 +31,9 @@
<!--packages-start-->
- @xen-orchestra/xapi minor
- xo-server-backup-reports patch
- xo-server patch
- xo-web patch
<!--packages-end-->

View File

@@ -241,7 +241,8 @@ const messages = {
closeTunnel: 'Close tunnel',
createSupportTicket: 'Create a support ticket',
restartXoServer: 'Restart XO Server',
restartXoServerConfirm: 'Restarting XO Server will interrupt any backup job or XO task that is currently running. Xen Orchestra will also be unavailable for a few seconds. Are you sure you want to restart XO Server?',
restartXoServerConfirm:
'Restarting XO Server will interrupt any backup job or XO task that is currently running. Xen Orchestra will also be unavailable for a few seconds. Are you sure you want to restart XO Server?',
openTunnel: 'Open tunnel',
supportCommunity: 'The XOA check and the support tunnel are available in XOA.',
supportTunnel: 'Support tunnel',
@@ -2197,11 +2198,10 @@ const messages = {
pwdChangeError: 'Incorrect password',
pwdChangeErrorBody: 'The old password provided is incorrect. Your password has not been changed.',
changePasswordOk: 'OK',
forgetTokens: 'Forget all connection tokens',
forgetTokensExplained:
'This will prevent other clients from authenticating with existing tokens but will not kill active sessions',
forgetTokensSuccess: 'Successfully forgot connection tokens',
forgetTokensError: 'Error while forgetting connection tokens',
forgetTokens: 'Forget all authentication tokens',
forgetTokensExplained: 'This prevents authenticating with existing tokens but the one used by the current session',
forgetTokensSuccess: 'Successfully forgot authentication tokens',
forgetTokensError: 'Error while forgetting authentication tokens',
sshKeys: 'SSH keys',
newAuthToken: 'New token',
newSshKey: 'New SSH key',