Compare commits

...

17 Commits

Author SHA1 Message Date
Florent Beauchamp
737b40e668 fix: don't fail when there is no snapshot 2022-12-23 10:05:36 +01:00
Florent Beauchamp
c5e8bee5f0 fix(vmdk-explorer): hide TLS warning 2022-12-23 10:05:03 +01:00
Florent Beauchamp
67fda0bd42 fix import 2022-12-19 16:45:28 +01:00
Florent Beauchamp
f9cd8d1f2b feat: add published package 2022-12-19 15:50:38 +01:00
Florent Beauchamp
f996feb9cb feat(@xen-orchestra/vmware-explorer): 0.0.2 2022-12-19 15:42:38 +01:00
Florent Beauchamp
1caef5f7fc feat:force channel for proxy 2022-12-19 15:25:18 +01:00
Florent Beauchamp
12ad942ff9 debug 2022-12-19 14:42:16 +01:00
Florent Beauchamp
7314521fcb remove debug 2022-12-19 14:42:16 +01:00
Florent Beauchamp
405f1d2bbf fix handle multiple disks per vm 2022-12-19 14:42:16 +01:00
Florent Beauchamp
ac8b03bc11 missing files 2022-12-19 14:42:16 +01:00
Florent Beauchamp
eb06c8a0be debug 2022-12-19 14:42:16 +01:00
Florent Beauchamp
8a8757072b feat: reuse session 2022-12-19 14:42:16 +01:00
Florent Beauchamp
d0c7284d3b stop before deltas 2022-12-19 14:42:16 +01:00
Florent Beauchamp
37b38a5af1 fix 2022-12-19 14:42:16 +01:00
Florent Beauchamp
a609772415 cleanup 2022-12-19 14:42:16 +01:00
Florent Beauchamp
0876de77f5 wip2 2022-12-19 14:42:16 +01:00
Florent Beauchamp
19b0d5f584 wip 2022-12-19 14:42:16 +01:00
14 changed files with 1018 additions and 4 deletions

View File

@@ -0,0 +1 @@
../../scripts/npmignore

View File

@@ -0,0 +1,154 @@
import { notEqual, strictEqual } from 'node:assert'
import { VhdAbstract } from 'vhd-lib'
import { createFooter, createHeader } from 'vhd-lib/_createFooterHeader.js'
import _computeGeometryForSize from 'vhd-lib/_computeGeometryForSize.js'
import { DISK_TYPES, FOOTER_SIZE } from 'vhd-lib/_constants.js'
import { unpackFooter, unpackHeader } from 'vhd-lib/Vhd/_utils.js'
export default class VhdCowd extends VhdAbstract {
#esxi
#datastore
#parentFileName
#path
#header
#footer
#grainDirectory
static async open(esxi, datastore, path) {
const vhd = new VhdCowd(esxi, datastore, path)
await vhd.readHeaderAndFooter()
return vhd
}
constructor(esxi, datastore, path, parentFileName) {
super()
this.#esxi = esxi
this.#path = path
this.#datastore = datastore
this.#parentFileName = parentFileName
}
get header() {
return this.#header
}
get footer() {
return this.#footer
}
containsBlock(blockId) {
notEqual(this.#grainDirectory, undefined, "bat must be loaded to use contain blocks'")
// only check if a grain table exist for on of the sector of the block
// the great news is that a grain size has 4096 entries of 512B = 2M
// and a vhd block is also 2M
// so we only need to check if a grain table exists (it's not created without data)
return this.#grainDirectory.readInt32LE(blockId * 4) !== 0
}
async #read(start, end) {
return (await this.#esxi.download(this.#datastore, this.#path, `${start}-${end}`)).buffer()
}
async readHeaderAndFooter(checkSecondFooter = true) {
const buffer = await this.#read(0, 2048)
strictEqual(buffer.slice(0, 4).toString('ascii'), 'COWD')
strictEqual(buffer.readInt32LE(4), 1) // version
strictEqual(buffer.readInt32LE(8), 3) // flags
const sectorCapacity = buffer.readInt32LE(12)
// const sectorGrainNumber = buffer.readInt32LE(16)
strictEqual(buffer.readInt32LE(20), 4) // grain directory position in sectors
// const nbGrainDirectoryEntries = buffer.readInt32LE(24)
// const nextFreeSector = buffer.readInt32LE(28)
const size = sectorCapacity * 512
// a grain directory entry represent a grain table
// a grain table can adresse, at most 4096 grain of 512 B
this.#header = unpackHeader(createHeader(Math.ceil(size / (4096 * 512))))
this.#header.parentUnicodeName = this.#parentFileName
const geometry = _computeGeometryForSize(size)
const actualSize = geometry.actualSize
this.#footer = unpackFooter(
createFooter(
actualSize,
Math.floor(Date.now() / 1000),
geometry,
FOOTER_SIZE,
this.#parentFileName ? DISK_TYPES.DIFFERENCING : DISK_TYPES.DYNAMIC
)
)
}
async readBlockAllocationTable() {
const nbBlocks = this.header.maxTableEntries
this.#grainDirectory = await this.#read(2048, 2048 + nbBlocks * 4 - 1)
}
async readBlock(blockId) {
const sectorOffset = this.#grainDirectory.readInt32LE(blockId * 4)
if (sectorOffset === 1) {
return Promise.resolve(Buffer.alloc(4096 * 512, 0))
}
const offset = sectorOffset * 512
const graintable = await this.#read(offset, offset + 2048 - 1)
const buf = Buffer.concat([
Buffer.alloc(512, 255), // vhd block bitmap,
Buffer.alloc(512 * 4096, 0), // empty data
])
// we have no guaranty that data are order or contiguous
// let's construct ranges to limit the number of queries
const fileOffsetToIndexInGrainTable = {}
let nbNonEmptyGrain = 0
for (let i = 0; i < graintable.length / 4; i++) {
const grainOffset = graintable.readInt32LE(i * 4)
if (grainOffset !== 0) {
// non empty grain
fileOffsetToIndexInGrainTable[grainOffset] = i
nbNonEmptyGrain++
}
}
// grain table exists but only contains empty grains
if (nbNonEmptyGrain === 0) {
return {
id: blockId,
bitmap: buf.slice(0, this.bitmapSize),
data: buf.slice(this.bitmapSize),
buffer: buf,
}
}
const offsets = Object.keys(fileOffsetToIndexInGrainTable).map(offset => parseInt(offset))
offsets.sort((a, b) => a - b)
let startOffset = offsets[0]
const ranges = []
const OVERPROVISION = 3
for (let i = 1; i < offsets.length; i++) {
if (offsets[i - 1] + OVERPROVISION < offsets[i]) {
ranges.push({ startOffset, endOffset: offsets[i - 1] })
startOffset = offsets[i]
}
}
ranges.push({ startOffset, endOffset: offsets[offsets.length - 1] })
for (const { startOffset, endOffset } of ranges) {
const startIndex = fileOffsetToIndexInGrainTable[startOffset]
const startInBlock = startIndex * 512 + 512 /* block bitmap */
const sectors = await this.#read(startOffset * 512, endOffset * 512 - 1)
// @todo : if overprovision > 1 , it may copy random data from the vmdk
sectors.copy(buf, startInBlock)
}
return {
id: blockId,
bitmap: buf.slice(0, 512),
data: buf.slice(512),
buffer: buf,
}
}
}

View File

@@ -0,0 +1,131 @@
import _computeGeometryForSize from 'vhd-lib/_computeGeometryForSize.js'
import { createFooter, createHeader } from 'vhd-lib/_createFooterHeader.js'
import { DISK_TYPES, FOOTER_SIZE } from 'vhd-lib/_constants.js'
import { readChunk } from '@vates/read-chunk'
import { unpackFooter, unpackHeader } from 'vhd-lib/Vhd/_utils.js'
import { VhdAbstract } from 'vhd-lib'
import assert from 'node:assert'
const VHD_BLOCK_LENGTH = 2 * 1024 * 1024
export default class VhdEsxiRaw extends VhdAbstract {
#esxi
#datastore
#path
#bat
#header
#footer
#stream
#bytesRead = 0
static async open(esxi, datastore, path) {
const vhd = new VhdEsxiRaw(esxi, datastore, path)
await vhd.readHeaderAndFooter()
return vhd
}
get header() {
return this.#header
}
get footer() {
return this.#footer
}
constructor(esxi, datastore, path) {
super()
this.#esxi = esxi
this.#path = path
this.#datastore = datastore
}
async readHeaderAndFooter(checkSecondFooter = true) {
const res = await this.#esxi.download(this.#datastore, this.#path)
const length = res.headers.get('content-length')
this.#header = unpackHeader(createHeader(length / VHD_BLOCK_LENGTH))
const geometry = _computeGeometryForSize(length)
const actualSize = geometry.actualSize
this.#footer = unpackFooter(
createFooter(actualSize, Math.floor(Date.now() / 1000), geometry, FOOTER_SIZE, DISK_TYPES.DYNAMIC)
)
}
containsBlock(blockId) {
assert.notEqual(this.#bat, undefined, "bat is not loaded")
return this.#bat.has(blockId)
}
async readBlock(blockId) {
const start = blockId * VHD_BLOCK_LENGTH
if (!this.#stream) {
this.#stream = (await this.#esxi.download(this.#datastore, this.#path)).body
this.#bytesRead = 0
}
if (this.#bytesRead > start) {
this.#stream.destroy()
this.#stream = (
await this.#esxi.download(this.#datastore, this.#path, `${start}-${this.footer.currentSize}`)
).body
this.#bytesRead = start
}
if (start - this.#bytesRead > 0) {
this.#stream.destroy()
this.#stream = (
await this.#esxi.download(this.#datastore, this.#path, `${start}-${this.footer.currentSize}`)
).body
this.#bytesRead = start
}
const data = await readChunk(this.#stream, VHD_BLOCK_LENGTH)
this.#bytesRead += data.length
const bitmap = Buffer.alloc(512, 255)
return {
id: blockId,
bitmap,
data,
buffer: Buffer.concat([bitmap, data]),
}
}
async readBlockAllocationTable() {
const res = await this.#esxi.download(this.#datastore, this.#path)
const length = res.headers.get('content-length')
const stream = res.body
const empty = Buffer.alloc(VHD_BLOCK_LENGTH, 0)
let pos = 0
this.#bat = new Set()
let nextChunkLength = Math.min(VHD_BLOCK_LENGTH, length)
const progress = setInterval(() => {
console.log("reading blocks", pos / VHD_BLOCK_LENGTH, "/", length/ VHD_BLOCK_LENGTH)
}, 30 * 1000)
while (nextChunkLength > 0) {
try{
const chunk = await readChunk(stream, nextChunkLength)
let isEmpty
if (nextChunkLength === VHD_BLOCK_LENGTH) {
isEmpty = empty.equals(chunk)
} else {
// last block can be smaller
isEmpty = Buffer.alloc(nextChunkLength, 0).equals(chunk)
}
if (!isEmpty) {
this.#bat.add(pos / VHD_BLOCK_LENGTH)
}
pos += VHD_BLOCK_LENGTH
nextChunkLength = Math.min(VHD_BLOCK_LENGTH, length - pos)
}catch(error){
clearInterval(progress)
throw error
}
}
console.log("BAT reading done, remaining ", this.#bat.size, "/", Math.ceil(length / VHD_BLOCK_LENGTH))
clearInterval(progress)
}
}

View File

@@ -0,0 +1,315 @@
import { Client } from 'node-vsphere-soap'
import { dirname } from 'node:path'
import { EventEmitter } from 'node:events'
import { strictEqual } from 'node:assert'
import fetch from 'node-fetch'
import parseVmdk from './parsers/vmdk.mjs'
import parseVmsd from './parsers/vmsd.mjs'
import parseVmx from './parsers/vmx.mjs'
import VhdCowd from './VhdEsxiCowd.mjs'
import VhdEsxiRaw from './VhdEsxiRaw.mjs'
const MAX_SCSI = 9
const MAX_ETHERNET = 9
export default class Esxi extends EventEmitter {
#client
#cookies
#host
#user
#password
#ready = false
constructor(host, user, password, sslVerify = true) {
super()
this.#host = host
this.#user = user
this.#password = password
this.#client = new Client(host, user, password, sslVerify)
process.on('warning', this.#eatTlsWarning )
this.#client.once('ready', () => {
process.off('warning', this.#eatTlsWarning )
this.#ready = true
this.emit('ready')
})
this.#client.on('error', err => {
process.off('warning', this.#eatTlsWarning )
console.error({
in:'ERROR',
code: err.code,
message: err.message
})
this.emit('error', err)
})
}
#eatTlsWarning (/* err */){
// console.log('yummy', err.code, err.message)
}
#exec(cmd, args) {
strictEqual(this.#ready, true)
const client = this.#client
return new Promise(function (resolve, reject) {
client.once('error', function (error) {
client.off('result', resolve)
reject(error)
})
client.runCommand(cmd, args).once('result', function () {
client.off('error', reject)
resolve(...arguments)
})
})
}
async download(dataStore, path, range) {
strictEqual(this.#ready, true)
const url = `https://${this.#host}/folder/${path}?dsName=${dataStore}`
const headers = {}
if(this.#cookies){
headers.cookie= this.#cookies
} else {
headers.Authorization = 'Basic ' + Buffer.from(this.#user + ':' + this.#password).toString('base64')
}
if (range) {
headers['content-type'] = 'multipart/byteranges'
headers.Range = 'bytes=' + range
}
const res = await fetch(url, {
method: 'GET',
headers,
highWaterMark: 10 * 1024 * 1024,
})
if (res.status < 200 || res.status >= 300) {
const error = new Error(res.status + ' ' + res.statusText + ' ' + url)
error.cause = res
throw error
}
if(res.headers.raw()['set-cookie']){
this.#cookies = res.headers.raw()['set-cookie']
}
return res
}
async search(type, properties) {
// get property collector
const propertyCollector = this.#client.serviceContent.propertyCollector
// get view manager
const viewManager = this.#client.serviceContent.viewManager
// get root folder
const rootFolder = this.#client.serviceContent.rootFolder
let result = await this.#exec('CreateContainerView', {
_this: viewManager,
container: rootFolder,
type: [type],
recursive: true,
})
// build all the data structures needed to query all the vm names
const containerView = result.returnval
const objectSpec = {
attributes: { 'xsi:type': 'ObjectSpec' }, // setting attributes xsi:type is important or else the server may mis-recognize types!
obj: containerView,
skip: true,
selectSet: [
{
attributes: { 'xsi:type': 'TraversalSpec' },
name: 'traverseEntities',
type: 'ContainerView',
path: 'view',
skip: false,
},
],
}
const propertyFilterSpec = {
attributes: { 'xsi:type': 'PropertyFilterSpec' },
propSet: properties.map(p => ({
attributes: { 'xsi:type': 'PropertySpec' },
type,
pathSet: [p],
})),
objectSet: [objectSpec],
}
result = await this.#exec('RetrievePropertiesEx', {
_this: propertyCollector,
specSet: [propertyFilterSpec],
options: { attributes: { type: 'RetrieveOptions' } },
})
const objects = {}
const returnObj = Array.isArray(result.returnval.objects) ? result.returnval.objects : [result.returnval.objects]
returnObj.forEach(({ obj, propSet }) => {
objects[obj.$value] = {}
propSet = Array.isArray(propSet) ? propSet : [propSet]
propSet.forEach(({ name, val }) => {
// don't care about the type for now
delete val.attributes
// a scalar value : simplify it
if (val.$value) {
objects[obj.$value][name] = val.$value
} else {
objects[obj.$value][name] = val
}
})
})
return objects
}
async #inspectVmdk(dataStores, currentDataStore, currentPath, filePath) {
let diskDataStore, diskPath
if (filePath.startsWith('/')) {
// disk is on another datastore
Object.keys(dataStores).forEach(dataStoreUrl => {
if (filePath.startsWith(dataStoreUrl)) {
diskDataStore = dataStores[dataStoreUrl].name
diskPath = filePath.substring(dataStoreUrl.length + 1)
}
})
} else {
diskDataStore = currentDataStore
diskPath = currentPath + '/' + filePath
}
const vmdkRes = await this.download(diskDataStore, diskPath)
const text = await vmdkRes.text()
const parsed = parseVmdk(text)
const { fileName, parentFileName, capacity } = parsed
return {
...parsed,
datastore: diskDataStore,
path: dirname(diskPath),
descriptionLabel: ' from esxi',
vhd: async () => {
if (fileName.endsWith('-flat.vmdk')) {
const vhd = await VhdEsxiRaw.open(this, diskDataStore, dirname(diskPath) + '/' + fileName)
await vhd.readBlockAllocationTable()
return vhd.stream()
}
// last snasphot only works when vm is powered off
const vhd = await VhdCowd.open(this, diskDataStore, dirname(diskPath) + '/' + fileName, parentFileName)
await vhd.readBlockAllocationTable()
return vhd.stream()
},
rawStream: async () => {
if (!fileName.endsWith('-flat.vmdk')) {
return
}
// @todo : only if vm is powered off
const stream = (await this.download(diskDataStore, dirname(diskPath) + '/' + fileName)).body
stream.length = capacity
return stream
},
}
}
async getTransferableVmMetadata(vmId) {
const search = await this.search('VirtualMachine', ['name', 'config', 'storage', 'runtime', 'snapshot'])
if (search[vmId] === undefined) {
throw new Error(`VM ${vmId} not found `)
}
const { config, runtime } = search[vmId]
const [, dataStore, vmxPath] = config.files.vmPathName.match(/^\[(.*)\] (.+.vmx)$/)
const res = await this.download(dataStore, vmxPath)
const vmx = parseVmx(await res.text())
// list datastores
const dataStores = {}
Object.values(await this.search('Datastore', ['summary'])).forEach(({ summary }) => {
dataStores[summary.url] = summary
})
const disks = []
for (let scsiIndex = 0; scsiIndex < MAX_SCSI; scsiIndex++) {
const scsiChannel = vmx[`scsi${scsiIndex}`]
if (scsiChannel === undefined) {
continue
}
for (const diskIndex in Object.values(scsiChannel)) {
const disk = scsiChannel[diskIndex]
if (typeof disk !== 'object' || disk.deviceType !== 'scsi-hardDisk') {
continue
}
disks.push({
...(await this.#inspectVmdk(dataStores, dataStore, dirname(vmxPath), disk.fileName)),
node: `scsi${scsiIndex}:${diskIndex}`,
})
}
}
const networks = []
for (let ethernetIndex = 0; ethernetIndex < MAX_ETHERNET; ethernetIndex++) {
const ethernet = vmx[`ethernet${ethernetIndex}`]
if (ethernet === undefined) {
continue
}
networks.push({
label: ethernet.networkName,
macAddress: ethernet.generatedAddress,
isGenerated: ethernet.addressType === 'generated',
})
}
const vmsd = await (await this.download(dataStore, vmxPath.replace('.vmx', '.vmsd'))).text()
let snapshots
if(vmsd){
snapshots = parseVmsd(vmsd)
for (const snapshotIndex in snapshots?.snapshots) {
const snapshot = snapshots.snapshots[snapshotIndex]
for (const diskIndex in snapshot.disks) {
const fileName = snapshot.disks[diskIndex].fileName
snapshot.disks[diskIndex] = {
node: snapshot.disks[diskIndex]?.node, // 'scsi0:0',
...(await this.#inspectVmdk(dataStores, dataStore, dirname(vmxPath), fileName)),
}
}
}
}
return {
name_label: config.name,
memory: parseInt(config.hardware.memoryMB) * 1024 * 1024,
numCpu: parseInt(config.hardware.numCPU),
guestToolsInstalled: false,
firmware: config.firmware, // bios or uefi
powerState: runtime.powerState,
snapshots,
disks: disks.map(({ fileName, rawDiskFileName, datastore, path, parentFileName, ...other }) => {
return {
...other,
vhd: async () => {
if (fileName.endsWith('-flat.vmdk')) {
const vhd = await VhdEsxiRaw.open(this, datastore, path + '/' + fileName)
await vhd.readBlockAllocationTable()
return vhd.stream()
}
// last snasphot only works when vm is powered off
const vhd = await VhdCowd.open(this, datastore, path + '/' + fileName, parentFileName)
await vhd.readBlockAllocationTable()
return vhd.stream()
},
rawStream: async () => {
if (fileName.endsWith('-flat.vmdk')) {
return
}
// @todo : only if vm is powered off
const stream = (await this.download(datastore, path + '/' + fileName)).body
stream.length = other.capacity
return stream
},
}
}),
networks,
}
}
}

View File

@@ -0,0 +1,14 @@
import Esxi from './esxi.mjs'
const host = '10.10.0.62'
const user = 'root'
const password = ''
const sslVerify = false
console.log(Esxi)
const esxi = new Esxi(host, user, password, sslVerify)
console.log(esxi)
esxi.on('ready', async function (){
const metadata = await esxi.getTransferableVmMetadata('4')
console.log('metadata', metadata)
})

View File

@@ -0,0 +1,30 @@
{
"license": "ISC",
"private": false,
"version": "0.0.2",
"name": "@xen-orchestra/vmware-explorer",
"dependencies": {
"@vates/read-chunk": "^1.0.1",
"lodash": "^4.17.21",
"node-fetch": "^3.3.0",
"node-vsphere-soap": "^0.0.2-5",
"vhd-lib": "^4.2.0"
},
"engines": {
"node": ">=18"
},
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/vmware-explorer",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/vmware-explorer",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -0,0 +1,45 @@
import { strictEqual } from 'node:assert'
// this file contains the disk metadata
export function parseDescriptor(text) {
const descriptorText = text.toString('ascii').replace(/\x00+$/, '') // eslint-disable-line no-control-regex
strictEqual(descriptorText.substr(0, 21), '# Disk DescriptorFile')
const descriptorDict = {}
const extentList = []
const lines = descriptorText.split(/\r?\n/).filter(line => {
return line.trim().length > 0 && line[0] !== '#'
})
for (const line of lines) {
const defLine = line.split('=')
// the wonky quote test is to avoid having an equal sign in the name of an extent
if (defLine.length === 2 && defLine[0].indexOf('"') === -1) {
descriptorDict[defLine[0].toLowerCase()] = defLine[1].replace(/['"]+/g, '')
} else {
const [, access, sizeSectors, type, name, offset] = line.match(/([A-Z]+) ([0-9]+) ([A-Z]+) "(.*)" ?(.*)$/)
extentList.push({ access, sizeSectors, type, name, offset })
}
}
strictEqual(extentList.length, 1, 'only one extent per vmdk is supported')
return { ...descriptorDict, extent: extentList[0] }
}
// https://github.com/libyal/libvmdk/blob/main/documentation/VMWare%20Virtual%20Disk%20Format%20(VMDK).asciidoc#5-the-cowd-sparse-extent-data-file
// vmdk file can be only a descriptor, or a
export default function parseVmdk(raw) {
strictEqual(typeof raw, 'string')
const descriptor = parseDescriptor(raw)
const isFull = !descriptor.parentfilenamehint
return {
capacity: descriptor.extent.sizeSectors * 512,
isFull,
uid: descriptor.cid,
fileName: descriptor.extent.name,
parentId: descriptor.parentcid,
parentFileName: descriptor.parentfilenamehint,
vmdFormat: descriptor.extent.type,
nameLabel: descriptor.extent.name,
}
}

View File

@@ -0,0 +1,53 @@
// these files contains the snapshot history of the VM
function set(obj, keyPath, val) {
const [key, ...other] = keyPath
const match = key.match(/^(.+)([0-9])$/)
if (match) {
// an array
let [, label, index] = match
label += 's'
if (!obj[label]) {
obj[label] = []
}
if (other.length) {
if (!obj[label][index]) {
obj[label][parseInt(index)] = {}
}
set(obj[label][index], other, val)
} else {
obj[label][index] = val
}
} else {
if (other.length) {
// an object
if (!obj[key]) {
// first time
obj[key] = {}
}
set(obj[key], other, val)
} else {
// a scalar
obj[key] = val
}
}
}
export default function parseVmsd(text) {
const parsed = {}
text.split('\n').forEach(line => {
const [key, val] = line.split(' = ')
if (!key.startsWith('snapshot')) {
return
}
set(parsed, key.split('.'), val?.substring(1, val.length - 1))
})
return {
lastUID: parsed.snapshot.current,
current: parsed.snapshot.current,
numSnapshots: parsed.snapshot.numSnapshots,
snapshots: Object.values(parsed.snapshots) || [],
}
}

View File

@@ -0,0 +1,48 @@
function set(obj, keyPath, val) {
let [key, ...other] = keyPath
if (key.includes(':')) {
// it's an array
let index
;[key, index] = key.split(':')
index = parseInt(index)
if (!obj[key]) {
// first time on this array
obj[key] = []
}
if (!other.length) {
// without descendant
obj[key][index] = val
} else {
// with descendant
if (!obj[key][index]) {
// first time on this descendant
obj[key][index] = {}
}
set(obj[key][index], other, val)
}
} else {
// it's an object
if (!other.length) {
// wihtout descendant
obj[key] = val
} else {
// with descendant
if (obj[key] === undefined) {
// first time
obj[key] = {}
}
set(obj[key], other, val)
}
}
}
// this file contains the vm configuration
export default function parseVmx(text) {
const vmx = {}
text.split('\n').forEach(line => {
const [key, val] = line.split(' = ')
set(vmx, key.split('.'), val?.substring(1, val.length - 1))
})
return vmx
}

View File

@@ -175,7 +175,7 @@ poolMarkingPrefix = 'xo:clientInfo:'
[xo-proxy]
callTimeout = '1 min'
channel = 'xo-proxy-appliance-{xoChannel}'
channel = 'xo-proxy-appliance-latest'
namespace = 'xoProxyAppliance'

View File

@@ -35,8 +35,8 @@
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.3",
"@vates/event-listeners-manager": "^1.0.1",
"@vates/otp": "^1.0.0",
"@vates/multi-key-map": "^0.1.0",
"@vates/otp": "^1.0.0",
"@vates/parse-duration": "^0.1.1",
"@vates/predicates": "^1.1.0",
"@vates/read-chunk": "^1.0.1",
@@ -51,6 +51,7 @@
"@xen-orchestra/mixins": "^0.8.2",
"@xen-orchestra/self-signed": "^0.1.3",
"@xen-orchestra/template": "^0.1.0",
"@xen-orchestra/vmware-explorer": "^0.0.2",
"@xen-orchestra/xapi": "^1.6.0",
"ajv": "^8.0.3",
"app-conf": "^2.3.0",

View File

@@ -1298,6 +1298,25 @@ import_.resolve = {
export { import_ as import }
export async function importFomEsxi({host, user, password, sslVerify=true, sr, network, vm, thin=false}){
return await this.migrationfromEsxi({host, user, password, sslVerify, thin, vm, sr, network})
}
importFomEsxi.params = {
host: { type: 'string' },
network: { type: 'string' },
password: { type: 'string' },
user: { type: 'string' },
sr: { type: 'string' },
sslVerify: {type: 'boolean', optional: true},
vm:{type: 'string'},
thin:{type: 'boolean', optional: true}
}
// -------------------------------------------------------------------
// FIXME: if position is used, all other disks after this position

View File

@@ -1,5 +1,10 @@
import { Backup } from '@xen-orchestra/backups/Backup.js'
import { v4 as generateUuid } from 'uuid'
import Esxi from '@xen-orchestra/vmware-explorer/esxi.mjs'
import OTHER_CONFIG_TEMPLATE from '../xapi/other-config-template.mjs'
import asyncMapSettled from '@xen-orchestra/async-map/legacy.js'
import { fromEvent } from 'promise-toolbox'
import { VDI_FORMAT_RAW, VDI_FORMAT_VHD } from '@xen-orchestra/xapi'
export default class MigrateVm {
constructor(app) {
@@ -106,4 +111,116 @@ export default class MigrateVm {
}
}
}
async migrationfromEsxi({ host, user, password, sslVerify, sr: srId, network: networkId, vm: vmId, thin }) {
const esxi = new Esxi(host, user, password, sslVerify)
const app = this._app
const sr = app.getXapiObject(srId)
const xapi = sr.$xapi
await fromEvent(esxi, 'ready')
const esxiVmMetadata = await esxi.getTransferableVmMetadata(vmId)
const { memory, name_label, networks, numCpu } = esxiVmMetadata
const vm = await xapi._getOrWaitObject(
await xapi.VM_create({
...OTHER_CONFIG_TEMPLATE,
memory_dynamic_max: memory,
memory_dynamic_min: memory,
memory_static_max: memory,
memory_static_min: memory,
name_description: 'from esxi',
name_label,
VCPUs_at_startup: numCpu,
VCPUs_max: numCpu,
})
)
await Promise.all([
asyncMapSettled(['start', 'start_on'], op => vm.update_blocked_operations(op, 'OVA import in progress...')),
vm.set_name_label(`[Importing...] ${name_label}`),
])
const vifDevices = await xapi.call('VM.get_allowed_VIF_devices', vm.$ref)
await Promise.all(
networks.map((network, i) =>
xapi.VIF_create({
device: vifDevices[i],
network: xapi.getObject(networkId).$ref,
VM: vm.$ref,
})
)
)
// get the snapshot to migrate
const snapshots = esxiVmMetadata.snapshots
let chain =[]
if(snapshots && snapshots.current){
const currentSnapshotId = snapshots.current
let currentSnapshot = snapshots.snapshots.find(({ uid }) => uid === currentSnapshotId)
chain = [currentSnapshot.disks]
while ((currentSnapshot = snapshots.snapshots.find(({ uid }) => uid === currentSnapshot.parent))) {
chain.push(currentSnapshot.disks)
}
chain.reverse()
}
chain.push(esxiVmMetadata.disks)
const chainsByNodes = {}
chain.forEach(disks => {
disks.forEach(disk => {
chainsByNodes[disk.node] = chainsByNodes[disk.node] || []
chainsByNodes[disk.node].push(disk)
})
})
let userdevice = 0
for (const node in chainsByNodes) {
const chainByNode = chainsByNodes[node]
const vdi = await xapi._getOrWaitObject(
await xapi.VDI_create({
name_description: 'fromESXI' + chainByNode[0].descriptionLabel,
name_label: '[ESXI]' + chainByNode[0].nameLabel,
SR: sr.$ref,
virtual_size: chainByNode[0].capacity,
})
)
console.log('vdi created')
await xapi.VBD_create({
userdevice: String(userdevice),
VDI: vdi.$ref,
VM: vm.$ref,
})
console.log('vbd created')
for (const disk of chainByNode) {
// the first one is a RAW disk ( full )
console.log('will import ', { disk })
let format = VDI_FORMAT_VHD
let stream
if (!thin) {
stream = await disk.rawStream()
format = VDI_FORMAT_RAW
}
if (!stream) {
stream = await disk.vhd()
}
console.log('will import in format ', { disk, format })
await vdi.$importContent(stream, { format })
// for now we don't handle snapshots
break
}
userdevice ++
}
console.log('disks created')
// remove the importing in label
await vm.set_name_label(esxiVmMetadata.name_label)
// remove lock on start
await asyncMapSettled(['start', 'start_on'], op => vm.update_blocked_operations(op, null))
return vm.uuid
}
}

View File

@@ -7128,6 +7128,11 @@ data-uri-to-buffer@3:
resolved "https://registry.yarnpkg.com/data-uri-to-buffer/-/data-uri-to-buffer-3.0.1.tgz#594b8973938c5bc2c33046535785341abc4f3636"
integrity sha512-WboRycPNsVw3B3TL559F7kuBUM4d8CgMEvk6xEJlOp7OBPjt6G7z8WMWlD2rOFZLk6OYfFIUGsCOWzcQH9K2og==
data-uri-to-buffer@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/data-uri-to-buffer/-/data-uri-to-buffer-4.0.0.tgz#b5db46aea50f6176428ac05b73be39a57701a64b"
integrity sha512-Vr3mLBA8qWmcuschSLAOogKgQ/Jwxulv3RNE4FXnYWRGujzrRWQI4m12fQqRkwX06C0KanhLr4hK+GydchZsaA==
dateformat@^2.0.0:
version "2.2.0"
resolved "https://registry.yarnpkg.com/dateformat/-/dateformat-2.2.0.tgz#4065e2013cf9fb916ddfd82efb506ad4c6769062"
@@ -9123,6 +9128,14 @@ feature-policy@0.3.0:
resolved "https://registry.yarnpkg.com/feature-policy/-/feature-policy-0.3.0.tgz#7430e8e54a40da01156ca30aaec1a381ce536069"
integrity sha512-ZtijOTFN7TzCujt1fnNhfWPFPSHeZkesff9AXZj+UEjYBynWNUIYpC87Ve4wHzyexQsImicLu7WsC2LHq7/xrQ==
fetch-blob@^3.1.2, fetch-blob@^3.1.4:
version "3.2.0"
resolved "https://registry.yarnpkg.com/fetch-blob/-/fetch-blob-3.2.0.tgz#f09b8d4bbd45adc6f0c20b7e787e793e309dcce9"
integrity sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==
dependencies:
node-domexception "^1.0.0"
web-streams-polyfill "^3.0.3"
fifolock@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/fifolock/-/fifolock-1.0.0.tgz#a37e54f3ebe69d13480d95a82abc42b7a5c1792d"
@@ -9310,6 +9323,11 @@ fined@^1.0.1:
object.pick "^1.2.0"
parse-filepath "^1.0.1"
first-chunk-stream@^0.1.0:
version "0.1.0"
resolved "https://registry.yarnpkg.com/first-chunk-stream/-/first-chunk-stream-0.1.0.tgz#755d3ec14d49a86e3d2fcc08beead5c0ca2b9c0a"
integrity sha512-o7kVqimu9cl+XNeEGqDPI8Ms4IViicBnjIDZ5uU+7aegfDhJJiU1Da9y52Qt0TfBO3rpKA5hW2cqwp4EkCfl9w==
first-chunk-stream@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/first-chunk-stream/-/first-chunk-stream-2.0.0.tgz#1bdecdb8e083c0664b91945581577a43a9f31d70"
@@ -9413,6 +9431,13 @@ form-data@~2.3.2:
combined-stream "^1.0.6"
mime-types "^2.1.12"
formdata-polyfill@^4.0.10:
version "4.0.10"
resolved "https://registry.yarnpkg.com/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz#24807c31c9d402e002ab3d8c720144ceb8848423"
integrity sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==
dependencies:
fetch-blob "^3.1.2"
forwarded@0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.2.0.tgz#2269936428aad4c15c7ebe9779a84bf0b2a81811"
@@ -13036,11 +13061,21 @@ lodash.uniq@^4.5.0:
resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773"
integrity sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==
lodash@3.x.x:
version "3.10.1"
resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.10.1.tgz#5bf45e8e49ba4189e17d482789dfd15bd140b7b6"
integrity sha512-9mDDwqVIma6OZX79ZlDACZl8sBm0TEnkf99zV3iMA4GzkIT/9hiqP5mY0HoT1iNLCrKc/R1HByV+yJfRWVJryQ==
lodash@^4.13.1, lodash@^4.15.0, lodash@^4.16.2, lodash@^4.16.6, lodash@^4.17.10, lodash@^4.17.11, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.2, lodash@^4.17.20, lodash@^4.17.21, lodash@^4.17.3, lodash@^4.17.4, lodash@^4.17.5, lodash@^4.2.0, lodash@^4.2.1, lodash@^4.6.1:
version "4.17.21"
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c"
integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==
lodash@~2.4.1:
version "2.4.2"
resolved "https://registry.yarnpkg.com/lodash/-/lodash-2.4.2.tgz#fadd834b9683073da179b3eae6d9c0d15053f73e"
integrity sha512-Kak1hi6/hYHGVPmdyiZijoQyz5x2iGVzs6w9GYB/HiXEtylY7tIoYEROMjvM1d9nXJqPOrG2MNPMn01bJ+S0Rw==
log-symbols@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-1.0.2.tgz#376ff7b58ea3086a0f09facc74617eca501e1a18"
@@ -13877,6 +13912,11 @@ node-addon-api@^5.0.0:
resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-5.0.0.tgz#7d7e6f9ef89043befdb20c1989c905ebde18c501"
integrity sha512-CvkDw2OEnme7ybCykJpVcKH+uAOLV2qLqiyla128dN9TkEWfrYmxG6C2boDe5KcNQqZF3orkqzGgOMvZ/JNekA==
node-domexception@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/node-domexception/-/node-domexception-1.0.0.tgz#6888db46a1f71c0b76b3f7555016b63fe64766e5"
integrity sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==
node-fetch@^1.0.1:
version "1.7.3"
resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-1.7.3.tgz#980f6f72d85211a5347c6b2bc18c5b84c3eb47ef"
@@ -13892,6 +13932,15 @@ node-fetch@^2.6.7:
dependencies:
whatwg-url "^5.0.0"
node-fetch@^3.3.0:
version "3.3.0"
resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-3.3.0.tgz#37e71db4ecc257057af828d523a7243d651d91e4"
integrity sha512-BKwRP/O0UvoMKp7GNdwPlObhYGB5DQqwhEDQlNKuoqwVYSxkSZCSbHjnFFmUEtwSKRPU4kNK8PbDYYitwaE3QA==
dependencies:
data-uri-to-buffer "^4.0.0"
fetch-blob "^3.1.4"
formdata-polyfill "^4.0.10"
node-forge@^0.10.0:
version "0.10.0"
resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.10.0.tgz#32dea2afb3e9926f02ee5ce8794902691a676bf3"
@@ -13967,6 +14016,15 @@ node-version@^1.0.0:
resolved "https://registry.yarnpkg.com/node-version/-/node-version-1.2.0.tgz#34fde3ffa8e1149bd323983479dda620e1b5060d"
integrity sha512-ma6oU4Sk0qOoKEAymVoTvk8EdXEobdS7m/mAGhDJ8Rouugho48crHBORAmy5BoOcv8wraPM6xumapQp5hl4iIQ==
node-vsphere-soap@^0.0.2-5:
version "0.0.2-5"
resolved "https://registry.yarnpkg.com/node-vsphere-soap/-/node-vsphere-soap-0.0.2-5.tgz#e055a17d23452276b0755949b163e16b4214755c"
integrity sha512-FC0QHZMV1QWuCPKdUmYWAX2yVnHNybEGblKOwkFow6DS6xAejIAqRn+hd5imK+VLt2yBT+0XprP44zl2+eTfzw==
dependencies:
lodash "3.x.x"
soap "0.8.0"
soap-cookie "0.10.x"
node-xmpp-client@^3.0.0:
version "3.2.0"
resolved "https://registry.yarnpkg.com/node-xmpp-client/-/node-xmpp-client-3.2.0.tgz#af4527df0cc5abd2690cba2139cc1ecdc81ea189"
@@ -16881,7 +16939,7 @@ replace-homedir@^1.0.0:
is-absolute "^1.0.0"
remove-trailing-separator "^1.1.0"
request@^2.65.0, request@^2.74.0, request@^2.87.0:
request@>=2.9.0, request@^2.65.0, request@^2.74.0, request@^2.87.0:
version "2.88.2"
resolved "https://registry.yarnpkg.com/request/-/request-2.88.2.tgz#d73c918731cb5a87da047e207234146f664d12b3"
integrity sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==
@@ -17201,7 +17259,7 @@ sass@^1.38.1:
immutable "^4.0.0"
source-map-js ">=0.6.2 <2.0.0"
sax@1.2.x, sax@>=0.6.0, sax@~1.2.4:
sax@1.2.x, sax@>=0.6, sax@>=0.6.0, sax@~1.2.4:
version "1.2.4"
resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9"
integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==
@@ -17594,6 +17652,21 @@ snapdragon@^0.8.1:
source-map-resolve "^0.5.0"
use "^3.1.0"
soap-cookie@0.10.x:
version "0.10.1"
resolved "https://registry.yarnpkg.com/soap-cookie/-/soap-cookie-0.10.1.tgz#7c7e62f3779b3e42e6b584d35ecabee92121a23f"
integrity sha512-lG3/Vozl7otPEFbEWLIDOyArDMAUZBJhMQBXW/L5cfGh88GMBtBItOw28zcMLO0o6Y7RC2vkUtZ7CWauTv0a7w==
soap@0.8.0:
version "0.8.0"
resolved "https://registry.yarnpkg.com/soap/-/soap-0.8.0.tgz#ab2766a7515fa5069f264a094e087e3fe74e2a78"
integrity sha512-rQpzOrol1pQpvhn9CdxDJg/D0scOwlr+DcdMFMAm/Q1cWbjaYKEMCl1dcfW5JjMFdf5esKUqGplDPEEB0Z2RZA==
dependencies:
lodash "~2.4.1"
request ">=2.9.0"
sax ">=0.6"
strip-bom "~0.3.1"
sockjs-client@^1.5.0:
version "1.6.1"
resolved "https://registry.yarnpkg.com/sockjs-client/-/sockjs-client-1.6.1.tgz#350b8eda42d6d52ddc030c39943364c11dcad806"
@@ -18177,6 +18250,14 @@ strip-bom@^4.0.0:
resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-4.0.0.tgz#9c3505c1db45bcedca3d9cf7a16f5c5aa3901878"
integrity sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==
strip-bom@~0.3.1:
version "0.3.1"
resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-0.3.1.tgz#9e8a39eff456ff9abc2f059f5f2225bb0f3f7ca5"
integrity sha512-8m24eJUyKXllSCydAwFVbr4QRZrRb82T2QfwtbO9gTLWhWIOxoDEZESzCGMgperFNyLhly6SDOs+LPH6/seBfw==
dependencies:
first-chunk-stream "^0.1.0"
is-utf8 "^0.2.0"
strip-eof@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf"
@@ -19773,6 +19854,11 @@ wcwidth@^1.0.1:
dependencies:
defaults "^1.0.3"
web-streams-polyfill@^3.0.3:
version "3.2.1"
resolved "https://registry.yarnpkg.com/web-streams-polyfill/-/web-streams-polyfill-3.2.1.tgz#71c2718c52b45fd49dbeee88634b3a60ceab42a6"
integrity sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q==
webidl-conversions@^3.0.0:
version "3.0.1"
resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871"