diff --git a/@xen-orchestra/xva/.npmignore b/@xen-orchestra/xva/.npmignore
new file mode 120000
index 000000000..008d1b9b9
--- /dev/null
+++ b/@xen-orchestra/xva/.npmignore
@@ -0,0 +1 @@
+../../scripts/npmignore
\ No newline at end of file
diff --git a/@xen-orchestra/xva/_isNotEmptyRef.mjs b/@xen-orchestra/xva/_isNotEmptyRef.mjs
new file mode 100644
index 000000000..7659a61f6
--- /dev/null
+++ b/@xen-orchestra/xva/_isNotEmptyRef.mjs
@@ -0,0 +1,5 @@
+export function isNotEmptyRef(val) {
+ const EMPTY = 'OpaqueRef:NULL'
+ const PREFIX = 'OpaqueRef:'
+ return val !== EMPTY && typeof val === 'string' && val.startsWith(PREFIX)
+}
diff --git a/@xen-orchestra/xva/_toOvaXml.mjs b/@xen-orchestra/xva/_toOvaXml.mjs
new file mode 100644
index 000000000..343b909fb
--- /dev/null
+++ b/@xen-orchestra/xva/_toOvaXml.mjs
@@ -0,0 +1,42 @@
+// from package xml-escape
+function escape(string) {
+ if (string === null || string === undefined) return
+ if (typeof string === 'number') {
+ return string
+ }
+ const map = {
+ '>': '>',
+ '<': '<',
+ "'": ''',
+ '"': '"',
+ '&': '&',
+ }
+
+ const pattern = '([&"<>\'])'
+ return string.replace(new RegExp(pattern, 'g'), function (str, item) {
+ return map[item]
+ })
+}
+
+function formatDate(d) {
+ return d.toISOString().replaceAll('-', '').replace('.000Z', 'Z')
+}
+
+export default function toOvaXml(obj) {
+ if (Array.isArray(obj)) {
+ return `${obj.map(val => toOvaXml(val)).join('')}`
+ }
+
+ if (typeof obj === 'object') {
+ if (obj instanceof Date) {
+ return `${escape(formatDate(obj))}`
+ }
+ return `${Object.entries(obj)
+ .map(([key, value]) => `${escape(key)}${toOvaXml(value)}`)
+ .join('')}`
+ }
+ if (typeof obj === 'boolean') {
+ return `${obj ? 1 : 0}`
+ }
+ return `${escape(obj)}`
+}
diff --git a/@xen-orchestra/xva/_writeDisk.mjs b/@xen-orchestra/xva/_writeDisk.mjs
new file mode 100644
index 000000000..cbf939d3c
--- /dev/null
+++ b/@xen-orchestra/xva/_writeDisk.mjs
@@ -0,0 +1,39 @@
+import { fromCallback } from 'promise-toolbox'
+import { readChunkStrict } from '@vates/read-chunk'
+import { XXHash64 } from 'xxhash'
+
+async function writeBlock(pack, data, name) {
+ await fromCallback.call(pack, pack.entry, { name }, data)
+ const hasher = new XXHash64(0)
+ hasher.update(data)
+ // weirdly, ocaml and xxhash return the bytes in reverse order to each other
+ const hash = hasher.digest().reverse().toString('hex').toUpperCase()
+ await fromCallback.call(pack, pack.entry, { name: `${name}.xxhash` }, Buffer.from(hash, 'utf8'))
+}
+export default async function addDisk(pack, vhd, basePath) {
+ let counter = 0
+ let written
+ const chunk_length = 1024 * 1024
+ const empty = Buffer.alloc(chunk_length, 0)
+ const stream = await vhd.rawContent()
+ let lastBlockLength
+ const diskSize = vhd.footer.currentSize
+ let remaining = diskSize
+ while (remaining > 0) {
+ const data = await readChunkStrict(stream, Math.min(chunk_length, remaining))
+ lastBlockLength = data.length
+ remaining -= lastBlockLength
+
+ if (counter === 0 || !data.equals(empty)) {
+ written = true
+ await writeBlock(pack, data, `${basePath}/${('' + counter).padStart(8, '0')}`)
+ } else {
+ written = false
+ }
+ counter++
+ }
+ if (!written) {
+ // last block must be present
+ writeBlock(pack, empty.slice(0, lastBlockLength), `${basePath}/${counter}`)
+ }
+}
diff --git a/@xen-orchestra/xva/_writeOvaXml.mjs b/@xen-orchestra/xva/_writeOvaXml.mjs
new file mode 100644
index 000000000..068826574
--- /dev/null
+++ b/@xen-orchestra/xva/_writeOvaXml.mjs
@@ -0,0 +1,154 @@
+import assert from 'node:assert'
+
+import { fromCallback } from 'promise-toolbox'
+import { v4 as uuid } from 'uuid'
+import defaultsDeep from 'lodash.defaultsdeep'
+
+import { DEFAULT_VBD } from './templates/vbd.mjs'
+import { DEFAULT_VDI } from './templates/vdi.mjs'
+import { DEFAULT_VIF } from './templates/vif.mjs'
+import { DEFAULT_VM } from './templates/vm.mjs'
+import toOvaXml from './_toOvaXml.mjs'
+
+export default async function writeOvaXml(
+ pack,
+ { memory, networks, nCpus, firmware, vdis, vhds, ...vmSnapshot },
+ { sr, network }
+) {
+ let refId = 0
+ function nextRef() {
+ return 'Ref:' + String(refId++).padStart(3, '0')
+ }
+ const data = {
+ version: {
+ hostname: 'localhost',
+ date: '2022-01-01',
+ product_version: '8.2.1',
+ product_brand: 'XCP-ng',
+ build_number: 'release/yangtze/master/58',
+ xapi_major: 1,
+ xapi_minor: 20,
+ export_vsn: 2,
+ },
+ objects: [],
+ }
+ const vm = defaultsDeep(
+ {
+ id: nextRef(),
+ // you can pass a full snapshot and nothing more to do
+ snapshot: vmSnapshot,
+ },
+ {
+ // some data need a little more work to be usable
+ // if they are not already in vm
+ snapshot: {
+ HVM_boot_params: {
+ firmware,
+ },
+ memory_static_max: memory,
+ memory_static_min: memory,
+ memory_dynamic_max: memory,
+ memory_dynamic_min: memory,
+ other_config: {
+ mac_seed: uuid(),
+ },
+ uuid: uuid(),
+ VCPUs_at_startup: nCpus,
+ VCPUs_max: nCpus,
+ },
+ },
+ DEFAULT_VM
+ )
+
+ data.objects.push(vm)
+ const srObj = defaultsDeep(
+ {
+ class: 'SR',
+ id: nextRef(),
+ snapshot: sr,
+ },
+ {
+ snapshot: {
+ VDIs: [],
+ },
+ }
+ )
+
+ data.objects.push(srObj)
+ assert.strictEqual(vhds.length, vdis.length)
+ for (let index = 0; index < vhds.length; index++) {
+ const userdevice = index + 1
+ const vhd = vhds[index]
+ const vdi = defaultsDeep(
+ {
+ id: nextRef(),
+ // overwrite SR from an opaqref to a ref:
+ snapshot: { ...vdis[index], SR: srObj.id },
+ },
+ {
+ snapshot: {
+ uuid: uuid(),
+ },
+ },
+ DEFAULT_VDI
+ )
+
+ data.objects.push(vdi)
+ srObj.snapshot.VDIs.push(vdi.id)
+ vhd.ref = vdi.id
+
+ const vbd = defaultsDeep(
+ {
+ id: nextRef(),
+ snapshot: {
+ device: `xvd${String.fromCharCode('a'.charCodeAt(0) + index)}`,
+ uuid: uuid(),
+ userdevice,
+ VM: vm.id,
+ VDI: vdi.id,
+ },
+ },
+ DEFAULT_VBD
+ )
+ data.objects.push(vbd)
+ vdi.snapshot.vbds.push(vbd.id)
+ vm.snapshot.VBDs.push(vbd.id)
+ }
+
+ if (network && networks?.length) {
+ const networkObj = defaultsDeep(
+ {
+ class: 'network',
+ id: nextRef(),
+ snapshot: network,
+ },
+ {
+ snapshot: {
+ vifs: [],
+ },
+ }
+ )
+ data.objects.push(networkObj)
+ let vifIndex = 0
+ for (const sourceNetwork of networks) {
+ const vif = defaultsDeep(
+ {
+ id: nextRef(),
+ snapshot: {
+ device: ++vifIndex,
+ MAC: sourceNetwork.macAddress,
+ MAC_autogenerated: sourceNetwork.isGenerated,
+ uuid: uuid(),
+ VM: vm.id,
+ network: networkObj.id,
+ },
+ },
+ DEFAULT_VIF
+ )
+ data.objects.push(vif)
+ networkObj.snapshot.vifs.push(vif.id)
+ }
+ }
+ const xml = toOvaXml(data)
+ await fromCallback.call(pack, pack.entry, { name: `ova.xml` }, xml)
+}
diff --git a/@xen-orchestra/xva/importVdi.mjs b/@xen-orchestra/xva/importVdi.mjs
new file mode 100644
index 000000000..e18533224
--- /dev/null
+++ b/@xen-orchestra/xva/importVdi.mjs
@@ -0,0 +1,32 @@
+import { isNotEmptyRef } from './_isNotEmptyRef.mjs'
+import { importVm } from './importVm.mjs'
+
+export async function importVdi(vdi, vhd, xapi, sr) {
+ // create a fake VM
+ const vmRef = await importVm(
+ {
+ name_label: `[xva-disp-import]${vdi.name_label}`,
+ memory: 1024 * 1024 * 32,
+ nCpus: 1,
+ firmware: 'bios',
+ vdis: [vdi],
+ vhds: [vhd],
+ },
+ xapi,
+ sr
+ )
+ // wait for the VM to be loaded if necessary
+ xapi.getObject(vmRef, undefined) ?? (await xapi.waitObject(vmRef))
+
+ const vbdRefs = await xapi.getField('VM', vmRef, 'VBDs')
+ // get the disk
+ const disks = { __proto__: null }
+ ;(await xapi.getRecords('VBD', vbdRefs)).forEach(vbd => {
+ if (vbd.type === 'Disk' && isNotEmptyRef(vbd.VDI)) {
+ disks[vbd.VDI] = true
+ }
+ })
+ // destroy the VM and VBD
+ await xapi.call('VM.destroy', vmRef)
+ return await xapi.getRecord('VDI', Object.keys(disks)[0])
+}
diff --git a/@xen-orchestra/xva/importVm.mjs b/@xen-orchestra/xva/importVm.mjs
new file mode 100644
index 000000000..e9e9ed6b2
--- /dev/null
+++ b/@xen-orchestra/xva/importVm.mjs
@@ -0,0 +1,33 @@
+import tar from 'tar-stream'
+
+import writeOvaXml from './_writeOvaXml.mjs'
+import writeDisk from './_writeDisk.mjs'
+
+export async function importVm(vm, xapi, sr, network) {
+ const pack = tar.pack()
+ const taskRef = await xapi.task_create('VM import')
+ const query = {
+ sr_id: sr.$ref,
+ }
+
+ const promise = xapi
+ .putResource(pack, '/import/', {
+ query,
+ task: taskRef,
+ })
+ .catch(err => console.error(err))
+
+ await writeOvaXml(pack, vm, { sr, network })
+ for (const vhd of vm.vhds) {
+ await writeDisk(pack, vhd, vhd.ref)
+ }
+ pack.finalize()
+ const str = await promise
+ const matches = /OpaqueRef:[0-9a-z-]+/.exec(str)
+ if (!matches) {
+ const error = new Error('no opaque ref found')
+ error.haystack = str
+ throw error
+ }
+ return matches[0]
+}
diff --git a/@xen-orchestra/xva/package.json b/@xen-orchestra/xva/package.json
new file mode 100644
index 000000000..f5f14758d
--- /dev/null
+++ b/@xen-orchestra/xva/package.json
@@ -0,0 +1,29 @@
+{
+ "name": "@xen-orchestra/xva-generator",
+ "version": "1.0.0",
+ "main": "index.js",
+ "author": "",
+ "license": "ISC",
+ "private": false,
+ "homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xva-generator",
+ "bugs": "https://github.com/vatesfr/xen-orchestra/issues",
+ "repository": {
+ "directory": "@xen-orchestra/xva-generator",
+ "type": "git",
+ "url": "https://github.com/vatesfr/xen-orchestra.git"
+ },
+ "engines": {
+ "node": ">=14.0"
+ },
+ "dependencies": {
+ "@vates/read-chunk": "^1.2.0",
+ "lodash.defaultsdeep": "^4.6.1",
+ "promise-toolbox": "^0.21.0",
+ "tar-stream": "^3.1.6",
+ "uuid": "^9.0.0",
+ "xxhash": "^0.3.0"
+ },
+ "scripts": {
+ "postversion": "npm publish --access public"
+ }
+}
diff --git a/@xen-orchestra/xva/templates/vbd.mjs b/@xen-orchestra/xva/templates/vbd.mjs
new file mode 100644
index 000000000..c8f906be3
--- /dev/null
+++ b/@xen-orchestra/xva/templates/vbd.mjs
@@ -0,0 +1,22 @@
+export const DEFAULT_VBD = {
+ class: 'VBD',
+ snapshot: {
+ allowed_operations: [],
+ bootable: true, // @todo : fix it
+ current_operations: {},
+ currently_attached: false,
+ empty: false,
+ metrics: 'OpaqueRef:NULL',
+ mode: 'RW',
+ other_config: {},
+ qos_algorithm_params: {},
+ qos_algorithm_type: '',
+ qos_supported_algorithms: [],
+ runtime_properties: {},
+ status_code: 0,
+ status_detail: '',
+ storage_lock: false,
+ type: 'Disk',
+ unpluggable: false,
+ },
+}
diff --git a/@xen-orchestra/xva/templates/vdi.mjs b/@xen-orchestra/xva/templates/vdi.mjs
new file mode 100644
index 000000000..ca8e9ce26
--- /dev/null
+++ b/@xen-orchestra/xva/templates/vdi.mjs
@@ -0,0 +1,29 @@
+export const DEFAULT_VDI = {
+ class: 'VDI',
+ snapshot: {
+ allow_caching: false,
+ cbt_enabled: false,
+ descriptionLabel: 'description',
+ is_a_snapshot: false,
+ managed: true,
+ metrics: 'OpaqueRef:NULL',
+ missing: false,
+ name_label: 'name_label',
+ on_boot: 'persist',
+ other_config: {},
+ parent: 'OpaqueRef:NULL',
+ physical_utilisation: 1024 * 1024,
+ read_only: false,
+ sharable: false,
+ snapshot_of: 'OpaqueRef:NULL',
+ snapshots: [],
+ SR: 'OpaqueRef:NULL',
+ storage_lock: false,
+ tags: [],
+ type: 'user',
+ uuid: '',
+ vbds: [],
+ virtual_size: 0,
+ xenstore_data: {},
+ },
+}
diff --git a/@xen-orchestra/xva/templates/vif.mjs b/@xen-orchestra/xva/templates/vif.mjs
new file mode 100644
index 000000000..00b73814c
--- /dev/null
+++ b/@xen-orchestra/xva/templates/vif.mjs
@@ -0,0 +1,26 @@
+export const DEFAULT_VIF = {
+ class: 'VIF',
+ snapshot: {
+ allowed_operations: [],
+ currently_attached: false,
+ current_operations: {},
+ ipv4_addresses: [],
+ ipv4_allowed: [],
+ ipv4_configuration_mode: 'None',
+ ipv4_gateway: '',
+ ipv6_addresses: [],
+ ipv6_allowed: [],
+ ipv6_configuration_mode: 'None',
+ ipv6_gateway: '',
+ locking_mode: 'network_default',
+ MTU: 1500,
+ metrics: 'OpaqueRef:NULL',
+ other_config: {},
+ qos_algorithm_params: {},
+ qos_algorithm_type: '',
+ qos_supported_algorithms: [],
+ runtime_properties: {},
+ status_code: 0,
+ status_detail: '',
+ },
+}
diff --git a/@xen-orchestra/xva/templates/vm.mjs b/@xen-orchestra/xva/templates/vm.mjs
new file mode 100644
index 000000000..b1e62e56d
--- /dev/null
+++ b/@xen-orchestra/xva/templates/vm.mjs
@@ -0,0 +1,106 @@
+export const DEFAULT_VM = {
+ class: 'VM',
+ id: null,
+ snapshot: {
+ actions_after_crash: 'restart',
+ actions_after_reboot: 'restart',
+ actions_after_shutdown: 'destroy',
+ affinity: 'Ref:53',
+ allowed_operations: [],
+ // appliance:'OpaqueRef:NULL',
+ attached_PCIs: [],
+ blobs: {},
+ blocked_operations: {},
+ children: [],
+ consoles: [],
+ crash_dumps: [],
+ current_operations: {},
+ domain_type: 'hvm',
+ domarch: '',
+ domid: -1,
+ generation_id: '',
+ guest_metrics: 'Ref:53',
+ ha_always_run: false,
+ ha_restart_priority: '',
+ hardware_platform_version: 0,
+ has_vendor_device: false,
+ HVM_boot_params: {
+ firmware: 'bios',
+ order: 'dcn',
+ },
+ HVM_boot_policy: 'BIOS order',
+ HVM_shadow_multiplier: 1,
+ is_a_template: false,
+ is_control_domain: false,
+ is_default_template: false,
+ is_snapshot_from_vmpp: false,
+ is_vmss_snapshot: false,
+ last_booted_record: '',
+ memory_dynamic_max: 1,
+ memory_dynamic_min: 1,
+ memory_overhead: 11534336,
+ memory_static_max: 1,
+ memory_static_min: 1,
+ memory_target: 0,
+ metrics: 'OpaqueRef:NULL',
+ name_label: 'from xva',
+ NVRAM: {},
+ name_description: ' from xva',
+ order: 0,
+ other_config: {
+ base_template_name: 'Other install media',
+ // mac_seed,
+ 'install-methods': 'cdrom',
+ },
+ parent: 'OpaqueRef:NULL',
+ PCI_bus: '',
+ platform: {
+ timeoffset: 1,
+ 'device-model': 'qemu-upstream-compat',
+ secureboot: 'false',
+ hpet: 'true',
+ nx: 'true',
+ pae: 'true',
+ apic: 'true',
+ viridian: 'true',
+ acpi: 1,
+ },
+ power_state: 'halted',
+ // protection_policy:'OpaqueRef:NULL',
+ PV_args: '',
+ PV_bootloader_args: '',
+ PV_bootloader: '',
+ PV_kernel: '',
+ PV_legacy_args: '',
+ PV_ramdisk: '',
+ recommendations: '',
+ reference_label: 'other-install-media',
+ requires_reboot: false,
+ resident_on: 'Ref:53',
+ // scheduled_to_be_resident_on:'OpaqueRef:NULL',
+ shutdown_delay: 0,
+ // snapshot_schedule: 'OpaqueRef:NULL',
+ snapshot_info: {},
+ snapshot_metadata: '',
+ snapshot_of: 'OpaqueRef:NULL',
+ snapshot_time: new Date(0),
+ snapshots: [],
+ start_delay: 0,
+ // suspend_VDI:'OpaqueRef:NULL',
+ // suspend_SR:'OpaqueRef:NULL',
+ tags: [],
+ transportable_snapshot_id: '',
+ // uuid,
+ user_version: 1,
+ VBDs: [],
+ VCPUs_at_startup: 1,
+ VCPUs_max: 1,
+ VCPUs_params: {},
+ version: 0,
+ VGPUs: [],
+ VIFs: [],
+ VTPMs: [],
+ VUSBs: [],
+ xenstore_data: {},
+ },
+}
diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md
index 1bde40b2b..24b77edb0 100644
--- a/CHANGELOG.unreleased.md
+++ b/CHANGELOG.unreleased.md
@@ -8,6 +8,7 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [SR] Possibility to create SMB shared SR [#991](https://github.com/vatesfr/xen-orchestra/issues/991) (PR [#7330](https://github.com/vatesfr/xen-orchestra/pull/7330))
+- [Import/VMWare] Speed up import and make all imports thin [#7323](https://github.com/vatesfr/xen-orchestra/issues/7323)
### Bug fixes
@@ -35,6 +36,7 @@
- @xen-orchestra/backups patch
- @xen-orchestra/vmware-explorer minor
+- @xen-orchestra/xva major
- xo-server minor
- xo-web minor
diff --git a/packages/xo-server/src/api/vm.mjs b/packages/xo-server/src/api/vm.mjs
index d5c5b6ff5..d10a96652 100644
--- a/packages/xo-server/src/api/vm.mjs
+++ b/packages/xo-server/src/api/vm.mjs
@@ -1382,19 +1382,9 @@ import_.resolve = {
export { import_ as import }
-export async function importFromEsxi({
- host,
- network,
- password,
- sr,
- sslVerify = true,
- stopSource = false,
- thin = false,
- user,
- vm,
-}) {
+export async function importFromEsxi({ host, network, password, sr, sslVerify = true, stopSource = false, user, vm }) {
const task = await this.tasks.create({ name: `importing vm ${vm}` })
- return task.run(() => this.migrationfromEsxi({ host, user, password, sslVerify, thin, vm, sr, network, stopSource }))
+ return task.run(() => this.migrationfromEsxi({ host, user, password, sslVerify, vm, sr, network, stopSource }))
}
importFromEsxi.params = {
@@ -1404,7 +1394,6 @@ importFromEsxi.params = {
sr: { type: 'string' },
sslVerify: { type: 'boolean', optional: true },
stopSource: { type: 'boolean', optional: true },
- thin: { type: 'boolean', optional: true },
user: { type: 'string' },
vm: { type: 'string' },
}
diff --git a/packages/xo-server/src/xo-mixins/migrate-vm.mjs b/packages/xo-server/src/xo-mixins/migrate-vm.mjs
index 9f6a1d51d..20209a836 100644
--- a/packages/xo-server/src/xo-mixins/migrate-vm.mjs
+++ b/packages/xo-server/src/xo-mixins/migrate-vm.mjs
@@ -4,12 +4,13 @@ import { fromEvent } from 'promise-toolbox'
import { createRunner } from '@xen-orchestra/backups/Backup.mjs'
import { Task } from '@xen-orchestra/mixins/Tasks.mjs'
import { v4 as generateUuid } from 'uuid'
-import { VDI_FORMAT_RAW, VDI_FORMAT_VHD } from '@xen-orchestra/xapi'
+import { VDI_FORMAT_VHD } from '@xen-orchestra/xapi'
import asyncMapSettled from '@xen-orchestra/async-map/legacy.js'
import Esxi from '@xen-orchestra/vmware-explorer/esxi.mjs'
import openDeltaVmdkasVhd from '@xen-orchestra/vmware-explorer/openDeltaVmdkAsVhd.mjs'
import OTHER_CONFIG_TEMPLATE from '../xapi/other-config-template.mjs'
import VhdEsxiRaw from '@xen-orchestra/vmware-explorer/VhdEsxiRaw.mjs'
+import { importVdi as importVdiThroughXva } from '@xen-orchestra/xva/importVdi.mjs'
export default class MigrateVm {
constructor(app) {
@@ -169,7 +170,7 @@ export default class MigrateVm {
@decorateWith(deferrable)
async migrationfromEsxi(
$defer,
- { host, user, password, sslVerify, sr: srId, network: networkId, vm: vmId, thin, stopSource }
+ { host, user, password, sslVerify, sr: srId, network: networkId, vm: vmId, stopSource }
) {
const app = this._app
const esxi = await this.#connectToEsxi(host, user, password, sslVerify)
@@ -220,7 +221,7 @@ export default class MigrateVm {
xapi.VIF_create(
{
device: vifDevices[i],
- network: xapi.getObject(networkId).$ref,
+ network: app.getXapiObject(networkId).$ref,
VM: vm.$ref,
},
{
@@ -231,29 +232,13 @@ export default class MigrateVm {
)
return vm
})
-
$defer.onFailure.call(xapi, 'VM_destroy', vm.$ref)
const vhds = await Promise.all(
Object.keys(chainsByNodes).map(async (node, userdevice) =>
Task.run({ properties: { name: `Cold import of disks ${node}` } }, async () => {
const chainByNode = chainsByNodes[node]
- const vdi = await xapi._getOrWaitObject(
- await xapi.VDI_create({
- name_description: 'fromESXI' + chainByNode[0].descriptionLabel,
- name_label: '[ESXI]' + chainByNode[0].nameLabel,
- SR: sr.$ref,
- virtual_size: chainByNode[0].capacity,
- })
- )
- // it can fail before the vdi is connected to the vm
-
- $defer.onFailure.call(xapi, 'VDI_destroy', vdi.$ref)
-
- await xapi.VBD_create({
- VDI: vdi.$ref,
- VM: vm.$ref,
- })
+ let vdi
let parentVhd, vhd
// if the VM is running we'll transfer everything before the last , which is an active disk
// the esxi api does not allow us to read an active disk
@@ -262,27 +247,37 @@ export default class MigrateVm {
for (let diskIndex = 0; diskIndex < nbColdDisks; diskIndex++) {
// the first one is a RAW disk ( full )
const disk = chainByNode[diskIndex]
- const { fileName, path, datastore, isFull } = disk
+ const { capacity, descriptionLabel, fileName, nameLabel, path, datastore, isFull } = disk
if (isFull) {
- vhd = await VhdEsxiRaw.open(esxi, datastore, path + '/' + fileName, { thin })
- await vhd.readBlockAllocationTable()
+ vhd = await VhdEsxiRaw.open(esxi, datastore, path + '/' + fileName)
+ // we don't need to read the BAT with the importVdiThroughXva process
+ const vdiMetadata = {
+ name_description: 'fromESXI' + descriptionLabel,
+ name_label: '[ESXI]' + nameLabel,
+ SR: sr.$ref,
+ virtual_size: capacity,
+ }
+ vdi = await importVdiThroughXva(vdiMetadata, vhd, xapi, sr)
+ // it can fail before the vdi is connected to the vm
+ $defer.onFailure.call(xapi, 'VDI_destroy', vdi.$ref)
+ await xapi.VBD_create({
+ VDI: vdi.$ref,
+ VM: vm.$ref,
+ })
} else {
- vhd = await openDeltaVmdkasVhd(esxi, datastore, path + '/' + fileName, parentVhd)
+ vhd = await openDeltaVmdkasVhd(esxi, datastore, path + '/' + fileName, parentVhd, {
+ lookMissingBlockInParent: false,
+ })
}
+ vhd.label = fileName
parentVhd = vhd
}
- // it can be empty if the VM don't have a snapshot and is running
- if (vhd !== undefined) {
- if (thin) {
- const stream = vhd.stream()
- await vdi.$importContent(stream, { format: VDI_FORMAT_VHD })
- } else {
- // no transformation when there is no snapshot in thick mode
- const stream = await vhd.rawContent()
- await vdi.$importContent(stream, { format: VDI_FORMAT_RAW })
- }
+ if (nbColdDisks > 1 /* got a cold snapshot chain */) {
+ // it can be empty if the VM don't have a snapshot and is running
+ const stream = vhd.stream()
+ await vdi.$importContent(stream, { format: VDI_FORMAT_VHD })
}
- return { vdi, vhd }
+ return vhd
})
)
)
@@ -296,15 +291,28 @@ export default class MigrateVm {
await Task.run({ properties: { name: `Transfering deltas of ${userdevice}` } }, async () => {
const chainByNode = chainsByNodes[node]
const disk = chainByNode[chainByNode.length - 1]
- const { fileName, path, datastore, isFull } = disk
- const { vdi, vhd: parentVhd } = vhds[userdevice]
+ const { capacity, descriptionLabel, fileName, nameLabel, path, datastore, isFull } = disk
+ let { vdi, vhd: parentVhd } = vhds[userdevice]
let vhd
if (vdi === undefined) {
throw new Error(`Can't import delta of a running VM without its parent vdi`)
}
if (isFull) {
- vhd = await VhdEsxiRaw.open(esxi, datastore, path + '/' + fileName, { thin })
- await vhd.readBlockAllocationTable()
+ vhd = await VhdEsxiRaw.open(esxi, datastore, path + '/' + fileName, { thin: false })
+ // we don't need to read the BAT with the importVdiThroughXva process
+ const vdiMetadata = {
+ name_description: 'fromESXI' + descriptionLabel,
+ name_label: '[ESXI]' + nameLabel,
+ SR: sr.$ref,
+ virtual_size: capacity,
+ }
+ vdi = await importVdiThroughXva(vdiMetadata, vhd, xapi, sr)
+ // it can fail before the vdi is connected to the vm
+ $defer.onFailure.call(xapi, 'VDI_destroy', vdi.$ref)
+ await xapi.VBD_create({
+ VDI: vdi.$ref,
+ VM: vm.$ref,
+ })
} else {
if (parentVhd === undefined) {
throw new Error(`Can't import delta of a running VM without its parent VHD`)
diff --git a/yarn.lock b/yarn.lock
index 78702d7bb..f5ef01a8b 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -14483,6 +14483,11 @@ lodash.debounce@^4.0.8:
resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af"
integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==
+lodash.defaultsdeep@^4.6.1:
+ version "4.6.1"
+ resolved "https://registry.yarnpkg.com/lodash.defaultsdeep/-/lodash.defaultsdeep-4.6.1.tgz#512e9bd721d272d94e3d3a63653fa17516741ca6"
+ integrity sha512-3j8wdDzYuWO3lM3Reg03MuQR957t287Rpcxp1njpEa8oDrikb+FwGdW3n+FELh/A6qib6yPit0j/pv9G/yeAqA==
+
lodash.escape@^3.0.0:
version "3.2.0"
resolved "https://registry.yarnpkg.com/lodash.escape/-/lodash.escape-3.2.0.tgz#995ee0dc18c1b48cc92effae71a10aab5b487698"
@@ -15454,7 +15459,7 @@ mute-stream@0.0.8:
resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d"
integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==
-nan@^2.12.1:
+nan@^2.12.1, nan@^2.13.2:
version "2.18.0"
resolved "https://registry.yarnpkg.com/nan/-/nan-2.18.0.tgz#26a6faae7ffbeb293a39660e88a76b82e30b7554"
integrity sha512-W7tfG7vMOGtD30sHoZSSc/JVYiyDPEyQVso/Zz+/uQd0B0L46gtC+pHha5FFMRpil6fm/AoEcRWyOVi4+E/f8w==
@@ -22422,6 +22427,13 @@ xtend@~2.1.1:
dependencies:
object-keys "~0.4.0"
+xxhash@^0.3.0:
+ version "0.3.0"
+ resolved "https://registry.yarnpkg.com/xxhash/-/xxhash-0.3.0.tgz#d20893a62c5b0f7260597dd55859b12a1e02c559"
+ integrity sha512-1ud2yyPiR1DJhgyF1ZVMt+Ijrn0VNS/wzej1Z8eSFfkNfRPp8abVZNV2u9tYy9574II0ZayZYZgJm8KJoyGLCw==
+ dependencies:
+ nan "^2.13.2"
+
xxhashjs@^0.2.1:
version "0.2.2"
resolved "https://registry.yarnpkg.com/xxhashjs/-/xxhashjs-0.2.2.tgz#8a6251567621a1c46a5ae204da0249c7f8caa9d8"